xref: /linux/drivers/gpu/drm/i915/display/intel_dpll.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include "i915_drv.h"
10 #include "i915_reg.h"
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_cx0_phy.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_types.h"
17 #include "intel_dpio_phy.h"
18 #include "intel_dpll.h"
19 #include "intel_lvds.h"
20 #include "intel_lvds_regs.h"
21 #include "intel_panel.h"
22 #include "intel_pps.h"
23 #include "intel_snps_phy.h"
24 #include "vlv_dpio_phy_regs.h"
25 #include "vlv_sideband.h"
26 
27 struct intel_dpll_funcs {
28 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
29 				  struct intel_crtc *crtc);
30 	int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
31 				    struct intel_crtc *crtc);
32 };
33 
34 struct intel_limit {
35 	struct {
36 		int min, max;
37 	} dot, vco, n, m, m1, m2, p, p1;
38 
39 	struct {
40 		int dot_limit;
41 		int p2_slow, p2_fast;
42 	} p2;
43 };
44 static const struct intel_limit intel_limits_i8xx_dac = {
45 	.dot = { .min = 25000, .max = 350000 },
46 	.vco = { .min = 908000, .max = 1512000 },
47 	.n = { .min = 2, .max = 16 },
48 	.m = { .min = 96, .max = 140 },
49 	.m1 = { .min = 18, .max = 26 },
50 	.m2 = { .min = 6, .max = 16 },
51 	.p = { .min = 4, .max = 128 },
52 	.p1 = { .min = 2, .max = 33 },
53 	.p2 = { .dot_limit = 165000,
54 		.p2_slow = 4, .p2_fast = 2 },
55 };
56 
57 static const struct intel_limit intel_limits_i8xx_dvo = {
58 	.dot = { .min = 25000, .max = 350000 },
59 	.vco = { .min = 908000, .max = 1512000 },
60 	.n = { .min = 2, .max = 16 },
61 	.m = { .min = 96, .max = 140 },
62 	.m1 = { .min = 18, .max = 26 },
63 	.m2 = { .min = 6, .max = 16 },
64 	.p = { .min = 4, .max = 128 },
65 	.p1 = { .min = 2, .max = 33 },
66 	.p2 = { .dot_limit = 165000,
67 		.p2_slow = 4, .p2_fast = 4 },
68 };
69 
70 static const struct intel_limit intel_limits_i8xx_lvds = {
71 	.dot = { .min = 25000, .max = 350000 },
72 	.vco = { .min = 908000, .max = 1512000 },
73 	.n = { .min = 2, .max = 16 },
74 	.m = { .min = 96, .max = 140 },
75 	.m1 = { .min = 18, .max = 26 },
76 	.m2 = { .min = 6, .max = 16 },
77 	.p = { .min = 4, .max = 128 },
78 	.p1 = { .min = 1, .max = 6 },
79 	.p2 = { .dot_limit = 165000,
80 		.p2_slow = 14, .p2_fast = 7 },
81 };
82 
83 static const struct intel_limit intel_limits_i9xx_sdvo = {
84 	.dot = { .min = 20000, .max = 400000 },
85 	.vco = { .min = 1400000, .max = 2800000 },
86 	.n = { .min = 1, .max = 6 },
87 	.m = { .min = 70, .max = 120 },
88 	.m1 = { .min = 8, .max = 18 },
89 	.m2 = { .min = 3, .max = 7 },
90 	.p = { .min = 5, .max = 80 },
91 	.p1 = { .min = 1, .max = 8 },
92 	.p2 = { .dot_limit = 200000,
93 		.p2_slow = 10, .p2_fast = 5 },
94 };
95 
96 static const struct intel_limit intel_limits_i9xx_lvds = {
97 	.dot = { .min = 20000, .max = 400000 },
98 	.vco = { .min = 1400000, .max = 2800000 },
99 	.n = { .min = 1, .max = 6 },
100 	.m = { .min = 70, .max = 120 },
101 	.m1 = { .min = 8, .max = 18 },
102 	.m2 = { .min = 3, .max = 7 },
103 	.p = { .min = 7, .max = 98 },
104 	.p1 = { .min = 1, .max = 8 },
105 	.p2 = { .dot_limit = 112000,
106 		.p2_slow = 14, .p2_fast = 7 },
107 };
108 
109 
110 static const struct intel_limit intel_limits_g4x_sdvo = {
111 	.dot = { .min = 25000, .max = 270000 },
112 	.vco = { .min = 1750000, .max = 3500000},
113 	.n = { .min = 1, .max = 4 },
114 	.m = { .min = 104, .max = 138 },
115 	.m1 = { .min = 17, .max = 23 },
116 	.m2 = { .min = 5, .max = 11 },
117 	.p = { .min = 10, .max = 30 },
118 	.p1 = { .min = 1, .max = 3},
119 	.p2 = { .dot_limit = 270000,
120 		.p2_slow = 10,
121 		.p2_fast = 10
122 	},
123 };
124 
125 static const struct intel_limit intel_limits_g4x_hdmi = {
126 	.dot = { .min = 22000, .max = 400000 },
127 	.vco = { .min = 1750000, .max = 3500000},
128 	.n = { .min = 1, .max = 4 },
129 	.m = { .min = 104, .max = 138 },
130 	.m1 = { .min = 16, .max = 23 },
131 	.m2 = { .min = 5, .max = 11 },
132 	.p = { .min = 5, .max = 80 },
133 	.p1 = { .min = 1, .max = 8},
134 	.p2 = { .dot_limit = 165000,
135 		.p2_slow = 10, .p2_fast = 5 },
136 };
137 
138 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
139 	.dot = { .min = 20000, .max = 115000 },
140 	.vco = { .min = 1750000, .max = 3500000 },
141 	.n = { .min = 1, .max = 3 },
142 	.m = { .min = 104, .max = 138 },
143 	.m1 = { .min = 17, .max = 23 },
144 	.m2 = { .min = 5, .max = 11 },
145 	.p = { .min = 28, .max = 112 },
146 	.p1 = { .min = 2, .max = 8 },
147 	.p2 = { .dot_limit = 0,
148 		.p2_slow = 14, .p2_fast = 14
149 	},
150 };
151 
152 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
153 	.dot = { .min = 80000, .max = 224000 },
154 	.vco = { .min = 1750000, .max = 3500000 },
155 	.n = { .min = 1, .max = 3 },
156 	.m = { .min = 104, .max = 138 },
157 	.m1 = { .min = 17, .max = 23 },
158 	.m2 = { .min = 5, .max = 11 },
159 	.p = { .min = 14, .max = 42 },
160 	.p1 = { .min = 2, .max = 6 },
161 	.p2 = { .dot_limit = 0,
162 		.p2_slow = 7, .p2_fast = 7
163 	},
164 };
165 
166 static const struct intel_limit pnv_limits_sdvo = {
167 	.dot = { .min = 20000, .max = 400000},
168 	.vco = { .min = 1700000, .max = 3500000 },
169 	/* Pineview's Ncounter is a ring counter */
170 	.n = { .min = 3, .max = 6 },
171 	.m = { .min = 2, .max = 256 },
172 	/* Pineview only has one combined m divider, which we treat as m2. */
173 	.m1 = { .min = 0, .max = 0 },
174 	.m2 = { .min = 0, .max = 254 },
175 	.p = { .min = 5, .max = 80 },
176 	.p1 = { .min = 1, .max = 8 },
177 	.p2 = { .dot_limit = 200000,
178 		.p2_slow = 10, .p2_fast = 5 },
179 };
180 
181 static const struct intel_limit pnv_limits_lvds = {
182 	.dot = { .min = 20000, .max = 400000 },
183 	.vco = { .min = 1700000, .max = 3500000 },
184 	.n = { .min = 3, .max = 6 },
185 	.m = { .min = 2, .max = 256 },
186 	.m1 = { .min = 0, .max = 0 },
187 	.m2 = { .min = 0, .max = 254 },
188 	.p = { .min = 7, .max = 112 },
189 	.p1 = { .min = 1, .max = 8 },
190 	.p2 = { .dot_limit = 112000,
191 		.p2_slow = 14, .p2_fast = 14 },
192 };
193 
194 /* Ironlake / Sandybridge
195  *
196  * We calculate clock using (register_value + 2) for N/M1/M2, so here
197  * the range value for them is (actual_value - 2).
198  */
199 static const struct intel_limit ilk_limits_dac = {
200 	.dot = { .min = 25000, .max = 350000 },
201 	.vco = { .min = 1760000, .max = 3510000 },
202 	.n = { .min = 1, .max = 5 },
203 	.m = { .min = 79, .max = 127 },
204 	.m1 = { .min = 12, .max = 22 },
205 	.m2 = { .min = 5, .max = 9 },
206 	.p = { .min = 5, .max = 80 },
207 	.p1 = { .min = 1, .max = 8 },
208 	.p2 = { .dot_limit = 225000,
209 		.p2_slow = 10, .p2_fast = 5 },
210 };
211 
212 static const struct intel_limit ilk_limits_single_lvds = {
213 	.dot = { .min = 25000, .max = 350000 },
214 	.vco = { .min = 1760000, .max = 3510000 },
215 	.n = { .min = 1, .max = 3 },
216 	.m = { .min = 79, .max = 118 },
217 	.m1 = { .min = 12, .max = 22 },
218 	.m2 = { .min = 5, .max = 9 },
219 	.p = { .min = 28, .max = 112 },
220 	.p1 = { .min = 2, .max = 8 },
221 	.p2 = { .dot_limit = 225000,
222 		.p2_slow = 14, .p2_fast = 14 },
223 };
224 
225 static const struct intel_limit ilk_limits_dual_lvds = {
226 	.dot = { .min = 25000, .max = 350000 },
227 	.vco = { .min = 1760000, .max = 3510000 },
228 	.n = { .min = 1, .max = 3 },
229 	.m = { .min = 79, .max = 127 },
230 	.m1 = { .min = 12, .max = 22 },
231 	.m2 = { .min = 5, .max = 9 },
232 	.p = { .min = 14, .max = 56 },
233 	.p1 = { .min = 2, .max = 8 },
234 	.p2 = { .dot_limit = 225000,
235 		.p2_slow = 7, .p2_fast = 7 },
236 };
237 
238 /* LVDS 100mhz refclk limits. */
239 static const struct intel_limit ilk_limits_single_lvds_100m = {
240 	.dot = { .min = 25000, .max = 350000 },
241 	.vco = { .min = 1760000, .max = 3510000 },
242 	.n = { .min = 1, .max = 2 },
243 	.m = { .min = 79, .max = 126 },
244 	.m1 = { .min = 12, .max = 22 },
245 	.m2 = { .min = 5, .max = 9 },
246 	.p = { .min = 28, .max = 112 },
247 	.p1 = { .min = 2, .max = 8 },
248 	.p2 = { .dot_limit = 225000,
249 		.p2_slow = 14, .p2_fast = 14 },
250 };
251 
252 static const struct intel_limit ilk_limits_dual_lvds_100m = {
253 	.dot = { .min = 25000, .max = 350000 },
254 	.vco = { .min = 1760000, .max = 3510000 },
255 	.n = { .min = 1, .max = 3 },
256 	.m = { .min = 79, .max = 126 },
257 	.m1 = { .min = 12, .max = 22 },
258 	.m2 = { .min = 5, .max = 9 },
259 	.p = { .min = 14, .max = 42 },
260 	.p1 = { .min = 2, .max = 6 },
261 	.p2 = { .dot_limit = 225000,
262 		.p2_slow = 7, .p2_fast = 7 },
263 };
264 
265 static const struct intel_limit intel_limits_vlv = {
266 	 /*
267 	  * These are based on the data rate limits (measured in fast clocks)
268 	  * since those are the strictest limits we have. The fast
269 	  * clock and actual rate limits are more relaxed, so checking
270 	  * them would make no difference.
271 	  */
272 	.dot = { .min = 25000, .max = 270000 },
273 	.vco = { .min = 4000000, .max = 6000000 },
274 	.n = { .min = 1, .max = 7 },
275 	.m1 = { .min = 2, .max = 3 },
276 	.m2 = { .min = 11, .max = 156 },
277 	.p1 = { .min = 2, .max = 3 },
278 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
279 };
280 
281 static const struct intel_limit intel_limits_chv = {
282 	/*
283 	 * These are based on the data rate limits (measured in fast clocks)
284 	 * since those are the strictest limits we have.  The fast
285 	 * clock and actual rate limits are more relaxed, so checking
286 	 * them would make no difference.
287 	 */
288 	.dot = { .min = 25000, .max = 540000 },
289 	.vco = { .min = 4800000, .max = 6480000 },
290 	.n = { .min = 1, .max = 1 },
291 	.m1 = { .min = 2, .max = 2 },
292 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
293 	.p1 = { .min = 2, .max = 4 },
294 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
295 };
296 
297 static const struct intel_limit intel_limits_bxt = {
298 	.dot = { .min = 25000, .max = 594000 },
299 	.vco = { .min = 4800000, .max = 6700000 },
300 	.n = { .min = 1, .max = 1 },
301 	.m1 = { .min = 2, .max = 2 },
302 	/* FIXME: find real m2 limits */
303 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
304 	.p1 = { .min = 2, .max = 4 },
305 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
306 };
307 
308 /*
309  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
310  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
311  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
312  * The helpers' return value is the rate of the clock that is fed to the
313  * display engine's pipe which can be the above fast dot clock rate or a
314  * divided-down version of it.
315  */
316 /* m1 is reserved as 0 in Pineview, n is a ring counter */
317 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
318 {
319 	clock->m = clock->m2 + 2;
320 	clock->p = clock->p1 * clock->p2;
321 
322 	clock->vco = clock->n == 0 ? 0 :
323 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
324 	clock->dot = clock->p == 0 ? 0 :
325 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
326 
327 	return clock->dot;
328 }
329 
330 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
331 {
332 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
333 }
334 
335 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
336 {
337 	clock->m = i9xx_dpll_compute_m(clock);
338 	clock->p = clock->p1 * clock->p2;
339 
340 	clock->vco = clock->n + 2 == 0 ? 0 :
341 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
342 	clock->dot = clock->p == 0 ? 0 :
343 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
344 
345 	return clock->dot;
346 }
347 
348 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
349 {
350 	clock->m = clock->m1 * clock->m2;
351 	clock->p = clock->p1 * clock->p2 * 5;
352 
353 	clock->vco = clock->n == 0 ? 0 :
354 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
355 	clock->dot = clock->p == 0 ? 0 :
356 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
357 
358 	return clock->dot;
359 }
360 
361 int chv_calc_dpll_params(int refclk, struct dpll *clock)
362 {
363 	clock->m = clock->m1 * clock->m2;
364 	clock->p = clock->p1 * clock->p2 * 5;
365 
366 	clock->vco = clock->n == 0 ? 0 :
367 		DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
368 	clock->dot = clock->p == 0 ? 0 :
369 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
370 
371 	return clock->dot;
372 }
373 
374 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
375 {
376 	struct intel_display *display = to_intel_display(crtc_state);
377 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
378 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
379 
380 	if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
381 		return display->vbt.lvds_ssc_freq;
382 	else if (HAS_PCH_SPLIT(i915))
383 		return 120000;
384 	else if (DISPLAY_VER(display) != 2)
385 		return 96000;
386 	else
387 		return 48000;
388 }
389 
390 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
391 			    struct intel_dpll_hw_state *dpll_hw_state)
392 {
393 	struct intel_display *display = to_intel_display(crtc);
394 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
395 
396 	if (DISPLAY_VER(display) >= 4) {
397 		u32 tmp;
398 
399 		/* No way to read it out on pipes B and C */
400 		if (display->platform.cherryview && crtc->pipe != PIPE_A)
401 			tmp = display->state.chv_dpll_md[crtc->pipe];
402 		else
403 			tmp = intel_de_read(display,
404 					    DPLL_MD(display, crtc->pipe));
405 
406 		hw_state->dpll_md = tmp;
407 	}
408 
409 	hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
410 
411 	if (!display->platform.valleyview && !display->platform.cherryview) {
412 		hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
413 		hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
414 	} else {
415 		/* Mask out read-only status bits. */
416 		hw_state->dpll &= ~(DPLL_LOCK_VLV |
417 				    DPLL_PORTC_READY_MASK |
418 				    DPLL_PORTB_READY_MASK);
419 	}
420 }
421 
422 /* Returns the clock of the currently programmed mode of the given pipe. */
423 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
424 {
425 	struct intel_display *display = to_intel_display(crtc_state);
426 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
427 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
428 	u32 dpll = hw_state->dpll;
429 	u32 fp;
430 	struct dpll clock;
431 	int port_clock;
432 	int refclk = i9xx_pll_refclk(crtc_state);
433 
434 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
435 		fp = hw_state->fp0;
436 	else
437 		fp = hw_state->fp1;
438 
439 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
440 	if (display->platform.pineview) {
441 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
442 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
443 	} else {
444 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
445 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
446 	}
447 
448 	if (DISPLAY_VER(display) != 2) {
449 		if (display->platform.pineview)
450 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
451 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
452 		else
453 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
454 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
455 
456 		switch (dpll & DPLL_MODE_MASK) {
457 		case DPLLB_MODE_DAC_SERIAL:
458 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
459 				5 : 10;
460 			break;
461 		case DPLLB_MODE_LVDS:
462 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
463 				7 : 14;
464 			break;
465 		default:
466 			drm_dbg_kms(display->drm,
467 				    "Unknown DPLL mode %08x in programmed "
468 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
469 			return;
470 		}
471 
472 		if (display->platform.pineview)
473 			port_clock = pnv_calc_dpll_params(refclk, &clock);
474 		else
475 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
476 	} else {
477 		enum pipe lvds_pipe;
478 
479 		if (display->platform.i85x &&
480 		    intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
481 		    lvds_pipe == crtc->pipe) {
482 			u32 lvds = intel_de_read(display, LVDS);
483 
484 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
485 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
486 
487 			if (lvds & LVDS_CLKB_POWER_UP)
488 				clock.p2 = 7;
489 			else
490 				clock.p2 = 14;
491 		} else {
492 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
493 				clock.p1 = 2;
494 			else {
495 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
496 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
497 			}
498 			if (dpll & PLL_P2_DIVIDE_BY_4)
499 				clock.p2 = 4;
500 			else
501 				clock.p2 = 2;
502 		}
503 
504 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
505 	}
506 
507 	/*
508 	 * This value includes pixel_multiplier. We will use
509 	 * port_clock to compute adjusted_mode.crtc_clock in the
510 	 * encoder's get_config() function.
511 	 */
512 	crtc_state->port_clock = port_clock;
513 }
514 
515 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
516 {
517 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
518 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
519 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
520 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
521 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
522 	int refclk = 100000;
523 	struct dpll clock;
524 	u32 tmp;
525 
526 	/* In case of DSI, DPLL will not be used */
527 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
528 		return;
529 
530 	vlv_dpio_get(dev_priv);
531 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
532 	vlv_dpio_put(dev_priv);
533 
534 	clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
535 	clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
536 	clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
537 	clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
538 	clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
539 
540 	crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
541 }
542 
543 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
544 {
545 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
546 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
547 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
548 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
549 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
550 	struct dpll clock;
551 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
552 	int refclk = 100000;
553 
554 	/* In case of DSI, DPLL will not be used */
555 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
556 		return;
557 
558 	vlv_dpio_get(dev_priv);
559 	cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
560 	pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
561 	pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
562 	pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
563 	pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
564 	vlv_dpio_put(dev_priv);
565 
566 	clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
567 	clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
568 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
569 		clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
570 	clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
571 	clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
572 	clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
573 
574 	crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
575 }
576 
577 /*
578  * Returns whether the given set of divisors are valid for a given refclk with
579  * the given connectors.
580  */
581 static bool intel_pll_is_valid(struct intel_display *display,
582 			       const struct intel_limit *limit,
583 			       const struct dpll *clock)
584 {
585 	if (clock->n < limit->n.min || limit->n.max < clock->n)
586 		return false;
587 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
588 		return false;
589 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
590 		return false;
591 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
592 		return false;
593 
594 	if (!display->platform.pineview &&
595 	    !display->platform.valleyview && !display->platform.cherryview &&
596 	    !display->platform.broxton && !display->platform.geminilake)
597 		if (clock->m1 <= clock->m2)
598 			return false;
599 
600 	if (!display->platform.valleyview && !display->platform.cherryview &&
601 	    !display->platform.broxton && !display->platform.geminilake) {
602 		if (clock->p < limit->p.min || limit->p.max < clock->p)
603 			return false;
604 		if (clock->m < limit->m.min || limit->m.max < clock->m)
605 			return false;
606 	}
607 
608 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
609 		return false;
610 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
611 	 * connector, etc., rather than just a single range.
612 	 */
613 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
614 		return false;
615 
616 	return true;
617 }
618 
619 static int
620 i9xx_select_p2_div(const struct intel_limit *limit,
621 		   const struct intel_crtc_state *crtc_state,
622 		   int target)
623 {
624 	struct intel_display *display = to_intel_display(crtc_state);
625 
626 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
627 		/*
628 		 * For LVDS just rely on its current settings for dual-channel.
629 		 * We haven't figured out how to reliably set up different
630 		 * single/dual channel state, if we even can.
631 		 */
632 		if (intel_is_dual_link_lvds(display))
633 			return limit->p2.p2_fast;
634 		else
635 			return limit->p2.p2_slow;
636 	} else {
637 		if (target < limit->p2.dot_limit)
638 			return limit->p2.p2_slow;
639 		else
640 			return limit->p2.p2_fast;
641 	}
642 }
643 
644 /*
645  * Returns a set of divisors for the desired target clock with the given
646  * refclk, or FALSE.
647  *
648  * Target and reference clocks are specified in kHz.
649  *
650  * If match_clock is provided, then best_clock P divider must match the P
651  * divider from @match_clock used for LVDS downclocking.
652  */
653 static bool
654 i9xx_find_best_dpll(const struct intel_limit *limit,
655 		    struct intel_crtc_state *crtc_state,
656 		    int target, int refclk,
657 		    const struct dpll *match_clock,
658 		    struct dpll *best_clock)
659 {
660 	struct intel_display *display = to_intel_display(crtc_state);
661 	struct dpll clock;
662 	int err = target;
663 
664 	memset(best_clock, 0, sizeof(*best_clock));
665 
666 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
667 
668 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
669 	     clock.m1++) {
670 		for (clock.m2 = limit->m2.min;
671 		     clock.m2 <= limit->m2.max; clock.m2++) {
672 			if (clock.m2 >= clock.m1)
673 				break;
674 			for (clock.n = limit->n.min;
675 			     clock.n <= limit->n.max; clock.n++) {
676 				for (clock.p1 = limit->p1.min;
677 					clock.p1 <= limit->p1.max; clock.p1++) {
678 					int this_err;
679 
680 					i9xx_calc_dpll_params(refclk, &clock);
681 					if (!intel_pll_is_valid(display,
682 								limit,
683 								&clock))
684 						continue;
685 					if (match_clock &&
686 					    clock.p != match_clock->p)
687 						continue;
688 
689 					this_err = abs(clock.dot - target);
690 					if (this_err < err) {
691 						*best_clock = clock;
692 						err = this_err;
693 					}
694 				}
695 			}
696 		}
697 	}
698 
699 	return (err != target);
700 }
701 
702 /*
703  * Returns a set of divisors for the desired target clock with the given
704  * refclk, or FALSE.
705  *
706  * Target and reference clocks are specified in kHz.
707  *
708  * If match_clock is provided, then best_clock P divider must match the P
709  * divider from @match_clock used for LVDS downclocking.
710  */
711 static bool
712 pnv_find_best_dpll(const struct intel_limit *limit,
713 		   struct intel_crtc_state *crtc_state,
714 		   int target, int refclk,
715 		   const struct dpll *match_clock,
716 		   struct dpll *best_clock)
717 {
718 	struct intel_display *display = to_intel_display(crtc_state);
719 	struct dpll clock;
720 	int err = target;
721 
722 	memset(best_clock, 0, sizeof(*best_clock));
723 
724 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
725 
726 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
727 	     clock.m1++) {
728 		for (clock.m2 = limit->m2.min;
729 		     clock.m2 <= limit->m2.max; clock.m2++) {
730 			for (clock.n = limit->n.min;
731 			     clock.n <= limit->n.max; clock.n++) {
732 				for (clock.p1 = limit->p1.min;
733 					clock.p1 <= limit->p1.max; clock.p1++) {
734 					int this_err;
735 
736 					pnv_calc_dpll_params(refclk, &clock);
737 					if (!intel_pll_is_valid(display,
738 								limit,
739 								&clock))
740 						continue;
741 					if (match_clock &&
742 					    clock.p != match_clock->p)
743 						continue;
744 
745 					this_err = abs(clock.dot - target);
746 					if (this_err < err) {
747 						*best_clock = clock;
748 						err = this_err;
749 					}
750 				}
751 			}
752 		}
753 	}
754 
755 	return (err != target);
756 }
757 
758 /*
759  * Returns a set of divisors for the desired target clock with the given
760  * refclk, or FALSE.
761  *
762  * Target and reference clocks are specified in kHz.
763  *
764  * If match_clock is provided, then best_clock P divider must match the P
765  * divider from @match_clock used for LVDS downclocking.
766  */
767 static bool
768 g4x_find_best_dpll(const struct intel_limit *limit,
769 		   struct intel_crtc_state *crtc_state,
770 		   int target, int refclk,
771 		   const struct dpll *match_clock,
772 		   struct dpll *best_clock)
773 {
774 	struct intel_display *display = to_intel_display(crtc_state);
775 	struct dpll clock;
776 	int max_n;
777 	bool found = false;
778 	/* approximately equals target * 0.00585 */
779 	int err_most = (target >> 8) + (target >> 9);
780 
781 	memset(best_clock, 0, sizeof(*best_clock));
782 
783 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
784 
785 	max_n = limit->n.max;
786 	/* based on hardware requirement, prefer smaller n to precision */
787 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
788 		/* based on hardware requirement, prefer larger m1,m2 */
789 		for (clock.m1 = limit->m1.max;
790 		     clock.m1 >= limit->m1.min; clock.m1--) {
791 			for (clock.m2 = limit->m2.max;
792 			     clock.m2 >= limit->m2.min; clock.m2--) {
793 				for (clock.p1 = limit->p1.max;
794 				     clock.p1 >= limit->p1.min; clock.p1--) {
795 					int this_err;
796 
797 					i9xx_calc_dpll_params(refclk, &clock);
798 					if (!intel_pll_is_valid(display,
799 								limit,
800 								&clock))
801 						continue;
802 
803 					this_err = abs(clock.dot - target);
804 					if (this_err < err_most) {
805 						*best_clock = clock;
806 						err_most = this_err;
807 						max_n = clock.n;
808 						found = true;
809 					}
810 				}
811 			}
812 		}
813 	}
814 	return found;
815 }
816 
817 /*
818  * Check if the calculated PLL configuration is more optimal compared to the
819  * best configuration and error found so far. Return the calculated error.
820  */
821 static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
822 			       const struct dpll *calculated_clock,
823 			       const struct dpll *best_clock,
824 			       unsigned int best_error_ppm,
825 			       unsigned int *error_ppm)
826 {
827 	/*
828 	 * For CHV ignore the error and consider only the P value.
829 	 * Prefer a bigger P value based on HW requirements.
830 	 */
831 	if (display->platform.cherryview) {
832 		*error_ppm = 0;
833 
834 		return calculated_clock->p > best_clock->p;
835 	}
836 
837 	if (drm_WARN_ON_ONCE(display->drm, !target_freq))
838 		return false;
839 
840 	*error_ppm = div_u64(1000000ULL *
841 				abs(target_freq - calculated_clock->dot),
842 			     target_freq);
843 	/*
844 	 * Prefer a better P value over a better (smaller) error if the error
845 	 * is small. Ensure this preference for future configurations too by
846 	 * setting the error to 0.
847 	 */
848 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
849 		*error_ppm = 0;
850 
851 		return true;
852 	}
853 
854 	return *error_ppm + 10 < best_error_ppm;
855 }
856 
857 /*
858  * Returns a set of divisors for the desired target clock with the given
859  * refclk, or FALSE.
860  */
861 static bool
862 vlv_find_best_dpll(const struct intel_limit *limit,
863 		   struct intel_crtc_state *crtc_state,
864 		   int target, int refclk,
865 		   const struct dpll *match_clock,
866 		   struct dpll *best_clock)
867 {
868 	struct intel_display *display = to_intel_display(crtc_state);
869 	struct dpll clock;
870 	unsigned int bestppm = 1000000;
871 	/* min update 19.2 MHz */
872 	int max_n = min(limit->n.max, refclk / 19200);
873 	bool found = false;
874 
875 	memset(best_clock, 0, sizeof(*best_clock));
876 
877 	/* based on hardware requirement, prefer smaller n to precision */
878 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
879 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
880 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
881 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
882 				clock.p = clock.p1 * clock.p2 * 5;
883 				/* based on hardware requirement, prefer bigger m1,m2 values */
884 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
885 					unsigned int ppm;
886 
887 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
888 								     refclk * clock.m1);
889 
890 					vlv_calc_dpll_params(refclk, &clock);
891 
892 					if (!intel_pll_is_valid(display,
893 								limit,
894 								&clock))
895 						continue;
896 
897 					if (!vlv_PLL_is_optimal(display, target,
898 								&clock,
899 								best_clock,
900 								bestppm, &ppm))
901 						continue;
902 
903 					*best_clock = clock;
904 					bestppm = ppm;
905 					found = true;
906 				}
907 			}
908 		}
909 	}
910 
911 	return found;
912 }
913 
914 /*
915  * Returns a set of divisors for the desired target clock with the given
916  * refclk, or FALSE.
917  */
918 static bool
919 chv_find_best_dpll(const struct intel_limit *limit,
920 		   struct intel_crtc_state *crtc_state,
921 		   int target, int refclk,
922 		   const struct dpll *match_clock,
923 		   struct dpll *best_clock)
924 {
925 	struct intel_display *display = to_intel_display(crtc_state);
926 	unsigned int best_error_ppm;
927 	struct dpll clock;
928 	u64 m2;
929 	int found = false;
930 
931 	memset(best_clock, 0, sizeof(*best_clock));
932 	best_error_ppm = 1000000;
933 
934 	/*
935 	 * Based on hardware doc, the n always set to 1, and m1 always
936 	 * set to 2.  If requires to support 200Mhz refclk, we need to
937 	 * revisit this because n may not 1 anymore.
938 	 */
939 	clock.n = 1;
940 	clock.m1 = 2;
941 
942 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
943 		for (clock.p2 = limit->p2.p2_fast;
944 				clock.p2 >= limit->p2.p2_slow;
945 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
946 			unsigned int error_ppm;
947 
948 			clock.p = clock.p1 * clock.p2 * 5;
949 
950 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
951 						   refclk * clock.m1);
952 
953 			if (m2 > INT_MAX/clock.m1)
954 				continue;
955 
956 			clock.m2 = m2;
957 
958 			chv_calc_dpll_params(refclk, &clock);
959 
960 			if (!intel_pll_is_valid(display, limit, &clock))
961 				continue;
962 
963 			if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
964 						best_error_ppm, &error_ppm))
965 				continue;
966 
967 			*best_clock = clock;
968 			best_error_ppm = error_ppm;
969 			found = true;
970 		}
971 	}
972 
973 	return found;
974 }
975 
976 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
977 			struct dpll *best_clock)
978 {
979 	const struct intel_limit *limit = &intel_limits_bxt;
980 	int refclk = 100000;
981 
982 	return chv_find_best_dpll(limit, crtc_state,
983 				  crtc_state->port_clock, refclk,
984 				  NULL, best_clock);
985 }
986 
987 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
988 {
989 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
990 }
991 
992 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
993 {
994 	return (1 << dpll->n) << 16 | dpll->m2;
995 }
996 
997 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
998 {
999 	return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1000 }
1001 
1002 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1003 		     const struct dpll *clock,
1004 		     const struct dpll *reduced_clock)
1005 {
1006 	struct intel_display *display = to_intel_display(crtc_state);
1007 	u32 dpll;
1008 
1009 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1010 
1011 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1012 		dpll |= DPLLB_MODE_LVDS;
1013 	else
1014 		dpll |= DPLLB_MODE_DAC_SERIAL;
1015 
1016 	if (display->platform.i945g || display->platform.i945gm ||
1017 	    display->platform.g33 || display->platform.pineview) {
1018 		dpll |= (crtc_state->pixel_multiplier - 1)
1019 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
1020 	}
1021 
1022 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1023 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1024 		dpll |= DPLL_SDVO_HIGH_SPEED;
1025 
1026 	if (intel_crtc_has_dp_encoder(crtc_state))
1027 		dpll |= DPLL_SDVO_HIGH_SPEED;
1028 
1029 	/* compute bitmask from p1 value */
1030 	if (display->platform.g4x) {
1031 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1032 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1033 	} else if (display->platform.pineview) {
1034 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1035 		WARN_ON(reduced_clock->p1 != clock->p1);
1036 	} else {
1037 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1038 		WARN_ON(reduced_clock->p1 != clock->p1);
1039 	}
1040 
1041 	switch (clock->p2) {
1042 	case 5:
1043 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1044 		break;
1045 	case 7:
1046 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1047 		break;
1048 	case 10:
1049 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1050 		break;
1051 	case 14:
1052 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1053 		break;
1054 	}
1055 	WARN_ON(reduced_clock->p2 != clock->p2);
1056 
1057 	if (DISPLAY_VER(display) >= 4)
1058 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1059 
1060 	if (crtc_state->sdvo_tv_clock)
1061 		dpll |= PLL_REF_INPUT_TVCLKINBC;
1062 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1063 		 intel_panel_use_ssc(display))
1064 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1065 	else
1066 		dpll |= PLL_REF_INPUT_DREFCLK;
1067 
1068 	return dpll;
1069 }
1070 
1071 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1072 			      const struct dpll *clock,
1073 			      const struct dpll *reduced_clock)
1074 {
1075 	struct intel_display *display = to_intel_display(crtc_state);
1076 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1077 
1078 	if (display->platform.pineview) {
1079 		hw_state->fp0 = pnv_dpll_compute_fp(clock);
1080 		hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1081 	} else {
1082 		hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1083 		hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1084 	}
1085 
1086 	hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1087 
1088 	if (DISPLAY_VER(display) >= 4)
1089 		hw_state->dpll_md = i965_dpll_md(crtc_state);
1090 }
1091 
1092 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1093 		     const struct dpll *clock,
1094 		     const struct dpll *reduced_clock)
1095 {
1096 	struct intel_display *display = to_intel_display(crtc_state);
1097 	u32 dpll;
1098 
1099 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1100 
1101 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1102 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1103 	} else {
1104 		if (clock->p1 == 2)
1105 			dpll |= PLL_P1_DIVIDE_BY_TWO;
1106 		else
1107 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1108 		if (clock->p2 == 4)
1109 			dpll |= PLL_P2_DIVIDE_BY_4;
1110 	}
1111 	WARN_ON(reduced_clock->p1 != clock->p1);
1112 	WARN_ON(reduced_clock->p2 != clock->p2);
1113 
1114 	/*
1115 	 * Bspec:
1116 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1117 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1118 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1119 	 *  Enable) must be set to “1” in both the DPLL A Control Register
1120 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1121 	 *
1122 	 * For simplicity We simply keep both bits always enabled in
1123 	 * both DPLLS. The spec says we should disable the DVO 2X clock
1124 	 * when not needed, but this seems to work fine in practice.
1125 	 */
1126 	if (display->platform.i830 ||
1127 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1128 		dpll |= DPLL_DVO_2X_MODE;
1129 
1130 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1131 	    intel_panel_use_ssc(display))
1132 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1133 	else
1134 		dpll |= PLL_REF_INPUT_DREFCLK;
1135 
1136 	return dpll;
1137 }
1138 
1139 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1140 			      const struct dpll *clock,
1141 			      const struct dpll *reduced_clock)
1142 {
1143 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1144 
1145 	hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1146 	hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1147 
1148 	hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1149 }
1150 
1151 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1152 				  struct intel_crtc *crtc)
1153 {
1154 	struct intel_display *display = to_intel_display(state);
1155 	struct intel_crtc_state *crtc_state =
1156 		intel_atomic_get_new_crtc_state(state, crtc);
1157 	struct intel_encoder *encoder =
1158 		intel_get_crtc_new_encoder(state, crtc_state);
1159 	int ret;
1160 
1161 	if (DISPLAY_VER(display) < 11 &&
1162 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1163 		return 0;
1164 
1165 	ret = intel_compute_shared_dplls(state, crtc, encoder);
1166 	if (ret)
1167 		return ret;
1168 
1169 	/* FIXME this is a mess */
1170 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1171 		return 0;
1172 
1173 	/* CRT dotclock is determined via other means */
1174 	if (!crtc_state->has_pch_encoder)
1175 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1176 
1177 	return 0;
1178 }
1179 
1180 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
1181 				    struct intel_crtc *crtc)
1182 {
1183 	struct intel_display *display = to_intel_display(state);
1184 	struct intel_crtc_state *crtc_state =
1185 		intel_atomic_get_new_crtc_state(state, crtc);
1186 	struct intel_encoder *encoder =
1187 		intel_get_crtc_new_encoder(state, crtc_state);
1188 
1189 	if (DISPLAY_VER(display) < 11 &&
1190 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1191 		return 0;
1192 
1193 	return intel_reserve_shared_dplls(state, crtc, encoder);
1194 }
1195 
1196 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1197 				  struct intel_crtc *crtc)
1198 {
1199 	struct intel_crtc_state *crtc_state =
1200 		intel_atomic_get_new_crtc_state(state, crtc);
1201 	struct intel_encoder *encoder =
1202 		intel_get_crtc_new_encoder(state, crtc_state);
1203 	int ret;
1204 
1205 	ret = intel_mpllb_calc_state(crtc_state, encoder);
1206 	if (ret)
1207 		return ret;
1208 
1209 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1210 
1211 	return 0;
1212 }
1213 
1214 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1215 				  struct intel_crtc *crtc)
1216 {
1217 	struct intel_crtc_state *crtc_state =
1218 		intel_atomic_get_new_crtc_state(state, crtc);
1219 	struct intel_encoder *encoder =
1220 		intel_get_crtc_new_encoder(state, crtc_state);
1221 	int ret;
1222 
1223 	ret = intel_cx0pll_calc_state(crtc_state, encoder);
1224 	if (ret)
1225 		return ret;
1226 
1227 	/* TODO: Do the readback via intel_compute_shared_dplls() */
1228 	crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
1229 
1230 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1231 
1232 	return 0;
1233 }
1234 
1235 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1236 {
1237 	struct intel_display *display = to_intel_display(crtc_state);
1238 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1239 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1240 
1241 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1242 	    ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
1243 	     (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(display))))
1244 		return 25;
1245 
1246 	if (crtc_state->sdvo_tv_clock)
1247 		return 20;
1248 
1249 	return 21;
1250 }
1251 
1252 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1253 {
1254 	return dpll->m < factor * dpll->n;
1255 }
1256 
1257 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1258 {
1259 	u32 fp;
1260 
1261 	fp = i9xx_dpll_compute_fp(clock);
1262 	if (ilk_needs_fb_cb_tune(clock, factor))
1263 		fp |= FP_CB_TUNE;
1264 
1265 	return fp;
1266 }
1267 
1268 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1269 		    const struct dpll *clock,
1270 		    const struct dpll *reduced_clock)
1271 {
1272 	struct intel_display *display = to_intel_display(crtc_state);
1273 	u32 dpll;
1274 
1275 	dpll = DPLL_VCO_ENABLE;
1276 
1277 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1278 		dpll |= DPLLB_MODE_LVDS;
1279 	else
1280 		dpll |= DPLLB_MODE_DAC_SERIAL;
1281 
1282 	dpll |= (crtc_state->pixel_multiplier - 1)
1283 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1284 
1285 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1286 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1287 		dpll |= DPLL_SDVO_HIGH_SPEED;
1288 
1289 	if (intel_crtc_has_dp_encoder(crtc_state))
1290 		dpll |= DPLL_SDVO_HIGH_SPEED;
1291 
1292 	/*
1293 	 * The high speed IO clock is only really required for
1294 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1295 	 * possible to share the DPLL between CRT and HDMI. Enabling
1296 	 * the clock needlessly does no real harm, except use up a
1297 	 * bit of power potentially.
1298 	 *
1299 	 * We'll limit this to IVB with 3 pipes, since it has only two
1300 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1301 	 * driving PCH ports at the same time. On SNB we could do this,
1302 	 * and potentially avoid enabling the second DPLL, but it's not
1303 	 * clear if it''s a win or loss power wise. No point in doing
1304 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1305 	 */
1306 	if (INTEL_NUM_PIPES(display) == 3 &&
1307 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1308 		dpll |= DPLL_SDVO_HIGH_SPEED;
1309 
1310 	/* compute bitmask from p1 value */
1311 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1312 	/* also FPA1 */
1313 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1314 
1315 	switch (clock->p2) {
1316 	case 5:
1317 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1318 		break;
1319 	case 7:
1320 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1321 		break;
1322 	case 10:
1323 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1324 		break;
1325 	case 14:
1326 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1327 		break;
1328 	}
1329 	WARN_ON(reduced_clock->p2 != clock->p2);
1330 
1331 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1332 	    intel_panel_use_ssc(display))
1333 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1334 	else
1335 		dpll |= PLL_REF_INPUT_DREFCLK;
1336 
1337 	return dpll;
1338 }
1339 
1340 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1341 			     const struct dpll *clock,
1342 			     const struct dpll *reduced_clock)
1343 {
1344 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1345 	int factor = ilk_fb_cb_factor(crtc_state);
1346 
1347 	hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1348 	hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1349 
1350 	hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1351 }
1352 
1353 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1354 				  struct intel_crtc *crtc)
1355 {
1356 	struct intel_display *display = to_intel_display(state);
1357 	struct intel_crtc_state *crtc_state =
1358 		intel_atomic_get_new_crtc_state(state, crtc);
1359 	const struct intel_limit *limit;
1360 	int refclk = 120000;
1361 	int ret;
1362 
1363 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1364 	if (!crtc_state->has_pch_encoder)
1365 		return 0;
1366 
1367 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1368 		if (intel_panel_use_ssc(display)) {
1369 			drm_dbg_kms(display->drm,
1370 				    "using SSC reference clock of %d kHz\n",
1371 				    display->vbt.lvds_ssc_freq);
1372 			refclk = display->vbt.lvds_ssc_freq;
1373 		}
1374 
1375 		if (intel_is_dual_link_lvds(display)) {
1376 			if (refclk == 100000)
1377 				limit = &ilk_limits_dual_lvds_100m;
1378 			else
1379 				limit = &ilk_limits_dual_lvds;
1380 		} else {
1381 			if (refclk == 100000)
1382 				limit = &ilk_limits_single_lvds_100m;
1383 			else
1384 				limit = &ilk_limits_single_lvds;
1385 		}
1386 	} else {
1387 		limit = &ilk_limits_dac;
1388 	}
1389 
1390 	if (!crtc_state->clock_set &&
1391 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1392 				refclk, NULL, &crtc_state->dpll))
1393 		return -EINVAL;
1394 
1395 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1396 
1397 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1398 			 &crtc_state->dpll);
1399 
1400 	ret = intel_compute_shared_dplls(state, crtc, NULL);
1401 	if (ret)
1402 		return ret;
1403 
1404 	crtc_state->port_clock = crtc_state->dpll.dot;
1405 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1406 
1407 	return ret;
1408 }
1409 
1410 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
1411 				    struct intel_crtc *crtc)
1412 {
1413 	struct intel_crtc_state *crtc_state =
1414 		intel_atomic_get_new_crtc_state(state, crtc);
1415 
1416 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1417 	if (!crtc_state->has_pch_encoder)
1418 		return 0;
1419 
1420 	return intel_reserve_shared_dplls(state, crtc, NULL);
1421 }
1422 
1423 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1424 {
1425 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1426 	u32 dpll;
1427 
1428 	dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1429 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1430 
1431 	if (crtc->pipe != PIPE_A)
1432 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1433 
1434 	/* DPLL not used with DSI, but still need the rest set up */
1435 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1436 		dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1437 
1438 	return dpll;
1439 }
1440 
1441 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1442 {
1443 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1444 
1445 	hw_state->dpll = vlv_dpll(crtc_state);
1446 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1447 }
1448 
1449 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1450 {
1451 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1452 	u32 dpll;
1453 
1454 	dpll = DPLL_SSC_REF_CLK_CHV |
1455 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1456 
1457 	if (crtc->pipe != PIPE_A)
1458 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1459 
1460 	/* DPLL not used with DSI, but still need the rest set up */
1461 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1462 		dpll |= DPLL_VCO_ENABLE;
1463 
1464 	return dpll;
1465 }
1466 
1467 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1468 {
1469 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1470 
1471 	hw_state->dpll = chv_dpll(crtc_state);
1472 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1473 }
1474 
1475 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1476 				  struct intel_crtc *crtc)
1477 {
1478 	struct intel_crtc_state *crtc_state =
1479 		intel_atomic_get_new_crtc_state(state, crtc);
1480 	const struct intel_limit *limit = &intel_limits_chv;
1481 	int refclk = 100000;
1482 
1483 	if (!crtc_state->clock_set &&
1484 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1485 				refclk, NULL, &crtc_state->dpll))
1486 		return -EINVAL;
1487 
1488 	chv_calc_dpll_params(refclk, &crtc_state->dpll);
1489 
1490 	chv_compute_dpll(crtc_state);
1491 
1492 	/* FIXME this is a mess */
1493 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1494 		return 0;
1495 
1496 	crtc_state->port_clock = crtc_state->dpll.dot;
1497 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1498 
1499 	return 0;
1500 }
1501 
1502 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1503 				  struct intel_crtc *crtc)
1504 {
1505 	struct intel_crtc_state *crtc_state =
1506 		intel_atomic_get_new_crtc_state(state, crtc);
1507 	const struct intel_limit *limit = &intel_limits_vlv;
1508 	int refclk = 100000;
1509 
1510 	if (!crtc_state->clock_set &&
1511 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1512 				refclk, NULL, &crtc_state->dpll))
1513 		return -EINVAL;
1514 
1515 	vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1516 
1517 	vlv_compute_dpll(crtc_state);
1518 
1519 	/* FIXME this is a mess */
1520 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1521 		return 0;
1522 
1523 	crtc_state->port_clock = crtc_state->dpll.dot;
1524 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1525 
1526 	return 0;
1527 }
1528 
1529 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1530 				  struct intel_crtc *crtc)
1531 {
1532 	struct intel_display *display = to_intel_display(state);
1533 	struct intel_crtc_state *crtc_state =
1534 		intel_atomic_get_new_crtc_state(state, crtc);
1535 	const struct intel_limit *limit;
1536 	int refclk = 96000;
1537 
1538 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1539 		if (intel_panel_use_ssc(display)) {
1540 			refclk = display->vbt.lvds_ssc_freq;
1541 			drm_dbg_kms(display->drm,
1542 				    "using SSC reference clock of %d kHz\n",
1543 				    refclk);
1544 		}
1545 
1546 		if (intel_is_dual_link_lvds(display))
1547 			limit = &intel_limits_g4x_dual_channel_lvds;
1548 		else
1549 			limit = &intel_limits_g4x_single_channel_lvds;
1550 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1551 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1552 		limit = &intel_limits_g4x_hdmi;
1553 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1554 		limit = &intel_limits_g4x_sdvo;
1555 	} else {
1556 		/* The option is for other outputs */
1557 		limit = &intel_limits_i9xx_sdvo;
1558 	}
1559 
1560 	if (!crtc_state->clock_set &&
1561 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1562 				refclk, NULL, &crtc_state->dpll))
1563 		return -EINVAL;
1564 
1565 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1566 
1567 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1568 			  &crtc_state->dpll);
1569 
1570 	crtc_state->port_clock = crtc_state->dpll.dot;
1571 	/* FIXME this is a mess */
1572 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1573 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1574 
1575 	return 0;
1576 }
1577 
1578 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1579 				  struct intel_crtc *crtc)
1580 {
1581 	struct intel_display *display = to_intel_display(state);
1582 	struct intel_crtc_state *crtc_state =
1583 		intel_atomic_get_new_crtc_state(state, crtc);
1584 	const struct intel_limit *limit;
1585 	int refclk = 96000;
1586 
1587 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1588 		if (intel_panel_use_ssc(display)) {
1589 			refclk = display->vbt.lvds_ssc_freq;
1590 			drm_dbg_kms(display->drm,
1591 				    "using SSC reference clock of %d kHz\n",
1592 				    refclk);
1593 		}
1594 
1595 		limit = &pnv_limits_lvds;
1596 	} else {
1597 		limit = &pnv_limits_sdvo;
1598 	}
1599 
1600 	if (!crtc_state->clock_set &&
1601 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1602 				refclk, NULL, &crtc_state->dpll))
1603 		return -EINVAL;
1604 
1605 	pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1606 
1607 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1608 			  &crtc_state->dpll);
1609 
1610 	crtc_state->port_clock = crtc_state->dpll.dot;
1611 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1612 
1613 	return 0;
1614 }
1615 
1616 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1617 				   struct intel_crtc *crtc)
1618 {
1619 	struct intel_display *display = to_intel_display(state);
1620 	struct intel_crtc_state *crtc_state =
1621 		intel_atomic_get_new_crtc_state(state, crtc);
1622 	const struct intel_limit *limit;
1623 	int refclk = 96000;
1624 
1625 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1626 		if (intel_panel_use_ssc(display)) {
1627 			refclk = display->vbt.lvds_ssc_freq;
1628 			drm_dbg_kms(display->drm,
1629 				    "using SSC reference clock of %d kHz\n",
1630 				    refclk);
1631 		}
1632 
1633 		limit = &intel_limits_i9xx_lvds;
1634 	} else {
1635 		limit = &intel_limits_i9xx_sdvo;
1636 	}
1637 
1638 	if (!crtc_state->clock_set &&
1639 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1640 				 refclk, NULL, &crtc_state->dpll))
1641 		return -EINVAL;
1642 
1643 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1644 
1645 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1646 			  &crtc_state->dpll);
1647 
1648 	crtc_state->port_clock = crtc_state->dpll.dot;
1649 	/* FIXME this is a mess */
1650 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1651 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1652 
1653 	return 0;
1654 }
1655 
1656 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1657 				   struct intel_crtc *crtc)
1658 {
1659 	struct intel_display *display = to_intel_display(state);
1660 	struct intel_crtc_state *crtc_state =
1661 		intel_atomic_get_new_crtc_state(state, crtc);
1662 	const struct intel_limit *limit;
1663 	int refclk = 48000;
1664 
1665 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1666 		if (intel_panel_use_ssc(display)) {
1667 			refclk = display->vbt.lvds_ssc_freq;
1668 			drm_dbg_kms(display->drm,
1669 				    "using SSC reference clock of %d kHz\n",
1670 				    refclk);
1671 		}
1672 
1673 		limit = &intel_limits_i8xx_lvds;
1674 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1675 		limit = &intel_limits_i8xx_dvo;
1676 	} else {
1677 		limit = &intel_limits_i8xx_dac;
1678 	}
1679 
1680 	if (!crtc_state->clock_set &&
1681 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1682 				 refclk, NULL, &crtc_state->dpll))
1683 		return -EINVAL;
1684 
1685 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1686 
1687 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1688 			  &crtc_state->dpll);
1689 
1690 	crtc_state->port_clock = crtc_state->dpll.dot;
1691 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1692 
1693 	return 0;
1694 }
1695 
1696 static const struct intel_dpll_funcs mtl_dpll_funcs = {
1697 	.crtc_compute_clock = mtl_crtc_compute_clock,
1698 };
1699 
1700 static const struct intel_dpll_funcs dg2_dpll_funcs = {
1701 	.crtc_compute_clock = dg2_crtc_compute_clock,
1702 };
1703 
1704 static const struct intel_dpll_funcs hsw_dpll_funcs = {
1705 	.crtc_compute_clock = hsw_crtc_compute_clock,
1706 	.crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
1707 };
1708 
1709 static const struct intel_dpll_funcs ilk_dpll_funcs = {
1710 	.crtc_compute_clock = ilk_crtc_compute_clock,
1711 	.crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
1712 };
1713 
1714 static const struct intel_dpll_funcs chv_dpll_funcs = {
1715 	.crtc_compute_clock = chv_crtc_compute_clock,
1716 };
1717 
1718 static const struct intel_dpll_funcs vlv_dpll_funcs = {
1719 	.crtc_compute_clock = vlv_crtc_compute_clock,
1720 };
1721 
1722 static const struct intel_dpll_funcs g4x_dpll_funcs = {
1723 	.crtc_compute_clock = g4x_crtc_compute_clock,
1724 };
1725 
1726 static const struct intel_dpll_funcs pnv_dpll_funcs = {
1727 	.crtc_compute_clock = pnv_crtc_compute_clock,
1728 };
1729 
1730 static const struct intel_dpll_funcs i9xx_dpll_funcs = {
1731 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1732 };
1733 
1734 static const struct intel_dpll_funcs i8xx_dpll_funcs = {
1735 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1736 };
1737 
1738 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1739 				  struct intel_crtc *crtc)
1740 {
1741 	struct intel_display *display = to_intel_display(state);
1742 	struct intel_crtc_state *crtc_state =
1743 		intel_atomic_get_new_crtc_state(state, crtc);
1744 	int ret;
1745 
1746 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1747 
1748 	memset(&crtc_state->dpll_hw_state, 0,
1749 	       sizeof(crtc_state->dpll_hw_state));
1750 
1751 	if (!crtc_state->hw.enable)
1752 		return 0;
1753 
1754 	ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
1755 	if (ret) {
1756 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1757 			    crtc->base.base.id, crtc->base.name);
1758 		return ret;
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
1765 				    struct intel_crtc *crtc)
1766 {
1767 	struct intel_display *display = to_intel_display(state);
1768 	struct intel_crtc_state *crtc_state =
1769 		intel_atomic_get_new_crtc_state(state, crtc);
1770 	int ret;
1771 
1772 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1773 	drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
1774 
1775 	if (!crtc_state->hw.enable || crtc_state->shared_dpll)
1776 		return 0;
1777 
1778 	if (!display->funcs.dpll->crtc_get_shared_dpll)
1779 		return 0;
1780 
1781 	ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
1782 	if (ret) {
1783 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1784 			    crtc->base.base.id, crtc->base.name);
1785 		return ret;
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 void
1792 intel_dpll_init_clock_hook(struct intel_display *display)
1793 {
1794 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1795 
1796 	if (DISPLAY_VER(display) >= 14)
1797 		display->funcs.dpll = &mtl_dpll_funcs;
1798 	else if (display->platform.dg2)
1799 		display->funcs.dpll = &dg2_dpll_funcs;
1800 	else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
1801 		display->funcs.dpll = &hsw_dpll_funcs;
1802 	else if (HAS_PCH_SPLIT(dev_priv))
1803 		display->funcs.dpll = &ilk_dpll_funcs;
1804 	else if (display->platform.cherryview)
1805 		display->funcs.dpll = &chv_dpll_funcs;
1806 	else if (display->platform.valleyview)
1807 		display->funcs.dpll = &vlv_dpll_funcs;
1808 	else if (display->platform.g4x)
1809 		display->funcs.dpll = &g4x_dpll_funcs;
1810 	else if (display->platform.pineview)
1811 		display->funcs.dpll = &pnv_dpll_funcs;
1812 	else if (DISPLAY_VER(display) != 2)
1813 		display->funcs.dpll = &i9xx_dpll_funcs;
1814 	else
1815 		display->funcs.dpll = &i8xx_dpll_funcs;
1816 }
1817 
1818 static bool i9xx_has_pps(struct intel_display *display)
1819 {
1820 	if (display->platform.i830)
1821 		return false;
1822 
1823 	return display->platform.pineview || display->platform.mobile;
1824 }
1825 
1826 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1827 {
1828 	struct intel_display *display = to_intel_display(crtc_state);
1829 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1830 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1831 	enum pipe pipe = crtc->pipe;
1832 	int i;
1833 
1834 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
1835 
1836 	/* PLL is protected by panel, make sure we can write it */
1837 	if (i9xx_has_pps(display))
1838 		assert_pps_unlocked(display, pipe);
1839 
1840 	intel_de_write(display, FP0(pipe), hw_state->fp0);
1841 	intel_de_write(display, FP1(pipe), hw_state->fp1);
1842 
1843 	/*
1844 	 * Apparently we need to have VGA mode enabled prior to changing
1845 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1846 	 * dividers, even though the register value does change.
1847 	 */
1848 	intel_de_write(display, DPLL(display, pipe),
1849 		       hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1850 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1851 
1852 	/* Wait for the clocks to stabilize. */
1853 	intel_de_posting_read(display, DPLL(display, pipe));
1854 	udelay(150);
1855 
1856 	if (DISPLAY_VER(display) >= 4) {
1857 		intel_de_write(display, DPLL_MD(display, pipe),
1858 			       hw_state->dpll_md);
1859 	} else {
1860 		/* The pixel multiplier can only be updated once the
1861 		 * DPLL is enabled and the clocks are stable.
1862 		 *
1863 		 * So write it again.
1864 		 */
1865 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1866 	}
1867 
1868 	/* We do this three times for luck */
1869 	for (i = 0; i < 3; i++) {
1870 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1871 		intel_de_posting_read(display, DPLL(display, pipe));
1872 		udelay(150); /* wait for warmup */
1873 	}
1874 }
1875 
1876 static void vlv_pllb_recal_opamp(struct intel_display *display,
1877 				 enum dpio_phy phy, enum dpio_channel ch)
1878 {
1879 	struct drm_i915_private *dev_priv = to_i915(display->drm);
1880 	u32 tmp;
1881 
1882 	/*
1883 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1884 	 * and set it to a reasonable value instead.
1885 	 */
1886 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1887 	tmp &= 0xffffff00;
1888 	tmp |= 0x00000030;
1889 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1890 
1891 	tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1892 	tmp &= 0x00ffffff;
1893 	tmp |= 0x8c000000;
1894 	vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1895 
1896 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
1897 	tmp &= 0xffffff00;
1898 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
1899 
1900 	tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
1901 	tmp &= 0x00ffffff;
1902 	tmp |= 0xb0000000;
1903 	vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
1904 }
1905 
1906 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1907 {
1908 	struct intel_display *display = to_intel_display(crtc_state);
1909 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1910 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1911 	const struct dpll *clock = &crtc_state->dpll;
1912 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1913 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1914 	enum pipe pipe = crtc->pipe;
1915 	u32 tmp, coreclk;
1916 
1917 	vlv_dpio_get(dev_priv);
1918 
1919 	/* See eDP HDMI DPIO driver vbios notes doc */
1920 
1921 	/* PLL B needs special handling */
1922 	if (pipe == PIPE_B)
1923 		vlv_pllb_recal_opamp(display, phy, ch);
1924 
1925 	/* Set up Tx target for periodic Rcomp update */
1926 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1927 
1928 	/* Disable target IRef on PLL */
1929 	tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
1930 	tmp &= 0x00ffffff;
1931 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
1932 
1933 	/* Disable fast lock */
1934 	vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
1935 
1936 	/* Set idtafcrecal before PLL is enabled */
1937 	tmp = DPIO_M1_DIV(clock->m1) |
1938 		DPIO_M2_DIV(clock->m2) |
1939 		DPIO_P1_DIV(clock->p1) |
1940 		DPIO_P2_DIV(clock->p2) |
1941 		DPIO_N_DIV(clock->n) |
1942 		DPIO_K_DIV(1);
1943 
1944 	/*
1945 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1946 	 * but we don't support that).
1947 	 * Note: don't use the DAC post divider as it seems unstable.
1948 	 */
1949 	tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1950 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1951 
1952 	tmp |= DPIO_ENABLE_CALIBRATION;
1953 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
1954 
1955 	/* Set HBR and RBR LPF coefficients */
1956 	if (crtc_state->port_clock == 162000 ||
1957 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1958 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1959 		vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1960 				 0x009f0003);
1961 	else
1962 		vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
1963 				 0x00d0000f);
1964 
1965 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1966 		/* Use SSC source */
1967 		if (pipe == PIPE_A)
1968 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1969 					 0x0df40000);
1970 		else
1971 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1972 					 0x0df70000);
1973 	} else { /* HDMI or VGA */
1974 		/* Use bend source */
1975 		if (pipe == PIPE_A)
1976 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1977 					 0x0df70000);
1978 		else
1979 			vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
1980 					 0x0df40000);
1981 	}
1982 
1983 	coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
1984 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1985 	if (intel_crtc_has_dp_encoder(crtc_state))
1986 		coreclk |= 0x01000000;
1987 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
1988 
1989 	vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
1990 
1991 	vlv_dpio_put(dev_priv);
1992 }
1993 
1994 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1995 {
1996 	struct intel_display *display = to_intel_display(crtc_state);
1997 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1998 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1999 	enum pipe pipe = crtc->pipe;
2000 
2001 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2002 	intel_de_posting_read(display, DPLL(display, pipe));
2003 	udelay(150);
2004 
2005 	if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2006 		drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
2007 }
2008 
2009 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2010 {
2011 	struct intel_display *display = to_intel_display(crtc_state);
2012 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2013 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2014 	enum pipe pipe = crtc->pipe;
2015 
2016 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2017 
2018 	/* PLL is protected by panel, make sure we can write it */
2019 	assert_pps_unlocked(display, pipe);
2020 
2021 	/* Enable Refclk */
2022 	intel_de_write(display, DPLL(display, pipe),
2023 		       hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2024 
2025 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2026 		vlv_prepare_pll(crtc_state);
2027 		_vlv_enable_pll(crtc_state);
2028 	}
2029 
2030 	intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
2031 	intel_de_posting_read(display, DPLL_MD(display, pipe));
2032 }
2033 
2034 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2035 {
2036 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2037 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2038 	const struct dpll *clock = &crtc_state->dpll;
2039 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2040 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2041 	u32 tmp, loopfilter, tribuf_calcntr;
2042 	u32 m2_frac;
2043 
2044 	m2_frac = clock->m2 & 0x3fffff;
2045 
2046 	vlv_dpio_get(dev_priv);
2047 
2048 	/* p1 and p2 divider */
2049 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
2050 		       DPIO_CHV_S1_DIV(5) |
2051 		       DPIO_CHV_P1_DIV(clock->p1) |
2052 		       DPIO_CHV_P2_DIV(clock->p2) |
2053 		       DPIO_CHV_K_DIV(1));
2054 
2055 	/* Feedback post-divider - m2 */
2056 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
2057 		       DPIO_CHV_M2_DIV(clock->m2 >> 22));
2058 
2059 	/* Feedback refclk divider - n and m1 */
2060 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
2061 		       DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2062 		       DPIO_CHV_N_DIV(1));
2063 
2064 	/* M2 fraction division */
2065 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
2066 		       DPIO_CHV_M2_FRAC_DIV(m2_frac));
2067 
2068 	/* M2 fraction division enable */
2069 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
2070 	tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2071 	tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2072 	if (m2_frac)
2073 		tmp |= DPIO_CHV_FRAC_DIV_EN;
2074 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
2075 
2076 	/* Program digital lock detect threshold */
2077 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
2078 	tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2079 		      DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2080 	tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2081 	if (!m2_frac)
2082 		tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2083 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
2084 
2085 	/* Loop filter */
2086 	if (clock->vco == 5400000) {
2087 		loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2088 			DPIO_CHV_INT_COEFF(0x8) |
2089 			DPIO_CHV_GAIN_CTRL(0x1);
2090 		tribuf_calcntr = 0x9;
2091 	} else if (clock->vco <= 6200000) {
2092 		loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2093 			DPIO_CHV_INT_COEFF(0xB) |
2094 			DPIO_CHV_GAIN_CTRL(0x3);
2095 		tribuf_calcntr = 0x9;
2096 	} else if (clock->vco <= 6480000) {
2097 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2098 			DPIO_CHV_INT_COEFF(0x9) |
2099 			DPIO_CHV_GAIN_CTRL(0x3);
2100 		tribuf_calcntr = 0x8;
2101 	} else {
2102 		/* Not supported. Apply the same limits as in the max case */
2103 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2104 			DPIO_CHV_INT_COEFF(0x9) |
2105 			DPIO_CHV_GAIN_CTRL(0x3);
2106 		tribuf_calcntr = 0;
2107 	}
2108 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
2109 
2110 	tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
2111 	tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2112 	tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2113 	vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
2114 
2115 	/* AFC Recal */
2116 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
2117 		       vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
2118 		       DPIO_AFC_RECAL);
2119 
2120 	vlv_dpio_put(dev_priv);
2121 }
2122 
2123 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2124 {
2125 	struct intel_display *display = to_intel_display(crtc_state);
2126 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2127 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2128 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2129 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2130 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2131 	enum pipe pipe = crtc->pipe;
2132 	u32 tmp;
2133 
2134 	vlv_dpio_get(dev_priv);
2135 
2136 	/* Enable back the 10bit clock to display controller */
2137 	tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2138 	tmp |= DPIO_DCLKP_EN;
2139 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
2140 
2141 	vlv_dpio_put(dev_priv);
2142 
2143 	/*
2144 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2145 	 */
2146 	udelay(1);
2147 
2148 	/* Enable PLL */
2149 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2150 
2151 	/* Check PLL is locked */
2152 	if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2153 		drm_err(display->drm, "PLL %d failed to lock\n", pipe);
2154 }
2155 
2156 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2157 {
2158 	struct intel_display *display = to_intel_display(crtc_state);
2159 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2160 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2161 	enum pipe pipe = crtc->pipe;
2162 
2163 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2164 
2165 	/* PLL is protected by panel, make sure we can write it */
2166 	assert_pps_unlocked(display, pipe);
2167 
2168 	/* Enable Refclk and SSC */
2169 	intel_de_write(display, DPLL(display, pipe),
2170 		       hw_state->dpll & ~DPLL_VCO_ENABLE);
2171 
2172 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2173 		chv_prepare_pll(crtc_state);
2174 		_chv_enable_pll(crtc_state);
2175 	}
2176 
2177 	if (pipe != PIPE_A) {
2178 		/*
2179 		 * WaPixelRepeatModeFixForC0:chv
2180 		 *
2181 		 * DPLLCMD is AWOL. Use chicken bits to propagate
2182 		 * the value from DPLLBMD to either pipe B or C.
2183 		 */
2184 		intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2185 		intel_de_write(display, DPLL_MD(display, PIPE_B),
2186 			       hw_state->dpll_md);
2187 		intel_de_write(display, CBR4_VLV, 0);
2188 		display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
2189 
2190 		/*
2191 		 * DPLLB VGA mode also seems to cause problems.
2192 		 * We should always have it disabled.
2193 		 */
2194 		drm_WARN_ON(display->drm,
2195 			    (intel_de_read(display, DPLL(display, PIPE_B)) &
2196 			     DPLL_VGA_MODE_DIS) == 0);
2197 	} else {
2198 		intel_de_write(display, DPLL_MD(display, pipe),
2199 			       hw_state->dpll_md);
2200 		intel_de_posting_read(display, DPLL_MD(display, pipe));
2201 	}
2202 }
2203 
2204 /**
2205  * vlv_force_pll_on - forcibly enable just the PLL
2206  * @display: display device
2207  * @pipe: pipe PLL to enable
2208  * @dpll: PLL configuration
2209  *
2210  * Enable the PLL for @pipe using the supplied @dpll config. To be used
2211  * in cases where we need the PLL enabled even when @pipe is not going to
2212  * be enabled.
2213  */
2214 int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
2215 		     const struct dpll *dpll)
2216 {
2217 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
2218 	struct intel_crtc_state *crtc_state;
2219 
2220 	crtc_state = intel_crtc_state_alloc(crtc);
2221 	if (!crtc_state)
2222 		return -ENOMEM;
2223 
2224 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
2225 	crtc_state->pixel_multiplier = 1;
2226 	crtc_state->dpll = *dpll;
2227 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2228 
2229 	if (display->platform.cherryview) {
2230 		chv_compute_dpll(crtc_state);
2231 		chv_enable_pll(crtc_state);
2232 	} else {
2233 		vlv_compute_dpll(crtc_state);
2234 		vlv_enable_pll(crtc_state);
2235 	}
2236 
2237 	intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2238 
2239 	return 0;
2240 }
2241 
2242 void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
2243 {
2244 	u32 val;
2245 
2246 	/* Make sure the pipe isn't still relying on us */
2247 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2248 
2249 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2250 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2251 	if (pipe != PIPE_A)
2252 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2253 
2254 	intel_de_write(display, DPLL(display, pipe), val);
2255 	intel_de_posting_read(display, DPLL(display, pipe));
2256 }
2257 
2258 void chv_disable_pll(struct intel_display *display, enum pipe pipe)
2259 {
2260 	struct drm_i915_private *dev_priv = to_i915(display->drm);
2261 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2262 	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2263 	u32 val;
2264 
2265 	/* Make sure the pipe isn't still relying on us */
2266 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2267 
2268 	val = DPLL_SSC_REF_CLK_CHV |
2269 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2270 	if (pipe != PIPE_A)
2271 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2272 
2273 	intel_de_write(display, DPLL(display, pipe), val);
2274 	intel_de_posting_read(display, DPLL(display, pipe));
2275 
2276 	vlv_dpio_get(dev_priv);
2277 
2278 	/* Disable 10bit clock to display controller */
2279 	val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
2280 	val &= ~DPIO_DCLKP_EN;
2281 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
2282 
2283 	vlv_dpio_put(dev_priv);
2284 }
2285 
2286 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2287 {
2288 	struct intel_display *display = to_intel_display(crtc_state);
2289 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2290 	enum pipe pipe = crtc->pipe;
2291 
2292 	/* Don't disable pipe or pipe PLLs if needed */
2293 	if (display->platform.i830)
2294 		return;
2295 
2296 	/* Make sure the pipe isn't still relying on us */
2297 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2298 
2299 	intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
2300 	intel_de_posting_read(display, DPLL(display, pipe));
2301 }
2302 
2303 
2304 /**
2305  * vlv_force_pll_off - forcibly disable just the PLL
2306  * @display: display device
2307  * @pipe: pipe PLL to disable
2308  *
2309  * Disable the PLL for @pipe. To be used in cases where we need
2310  * the PLL enabled even when @pipe is not going to be enabled.
2311  */
2312 void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
2313 {
2314 	if (display->platform.cherryview)
2315 		chv_disable_pll(display, pipe);
2316 	else
2317 		vlv_disable_pll(display, pipe);
2318 }
2319 
2320 /* Only for pre-ILK configs */
2321 static void assert_pll(struct intel_display *display,
2322 		       enum pipe pipe, bool state)
2323 {
2324 	bool cur_state;
2325 
2326 	cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
2327 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
2328 				 "PLL state assertion failure (expected %s, current %s)\n",
2329 				 str_on_off(state), str_on_off(cur_state));
2330 }
2331 
2332 void assert_pll_enabled(struct intel_display *display, enum pipe pipe)
2333 {
2334 	assert_pll(display, pipe, true);
2335 }
2336 
2337 void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
2338 {
2339 	assert_pll(display, pipe, false);
2340 }
2341