xref: /linux/drivers/gpu/drm/i915/display/intel_dpll.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include <drm/drm_print.h>
10 
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_cx0_phy.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_regs.h"
17 #include "intel_display_types.h"
18 #include "intel_dpio_phy.h"
19 #include "intel_dpll.h"
20 #include "intel_lt_phy.h"
21 #include "intel_lvds.h"
22 #include "intel_lvds_regs.h"
23 #include "intel_panel.h"
24 #include "intel_pps.h"
25 #include "intel_snps_phy.h"
26 #include "vlv_dpio_phy_regs.h"
27 #include "vlv_sideband.h"
28 
29 struct intel_dpll_global_funcs {
30 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
31 				  struct intel_crtc *crtc);
32 	int (*crtc_get_dpll)(struct intel_atomic_state *state,
33 			     struct intel_crtc *crtc);
34 };
35 
36 struct intel_limit {
37 	struct {
38 		int min, max;
39 	} dot, vco, n, m, m1, m2, p, p1;
40 
41 	struct {
42 		int dot_limit;
43 		int p2_slow, p2_fast;
44 	} p2;
45 };
46 static const struct intel_limit intel_limits_i8xx_dac = {
47 	.dot = { .min = 25000, .max = 350000 },
48 	.vco = { .min = 908000, .max = 1512000 },
49 	.n = { .min = 2, .max = 16 },
50 	.m = { .min = 96, .max = 140 },
51 	.m1 = { .min = 18, .max = 26 },
52 	.m2 = { .min = 6, .max = 16 },
53 	.p = { .min = 4, .max = 128 },
54 	.p1 = { .min = 2, .max = 33 },
55 	.p2 = { .dot_limit = 165000,
56 		.p2_slow = 4, .p2_fast = 2 },
57 };
58 
59 static const struct intel_limit intel_limits_i8xx_dvo = {
60 	.dot = { .min = 25000, .max = 350000 },
61 	.vco = { .min = 908000, .max = 1512000 },
62 	.n = { .min = 2, .max = 16 },
63 	.m = { .min = 96, .max = 140 },
64 	.m1 = { .min = 18, .max = 26 },
65 	.m2 = { .min = 6, .max = 16 },
66 	.p = { .min = 4, .max = 128 },
67 	.p1 = { .min = 2, .max = 33 },
68 	.p2 = { .dot_limit = 165000,
69 		.p2_slow = 4, .p2_fast = 4 },
70 };
71 
72 static const struct intel_limit intel_limits_i8xx_lvds = {
73 	.dot = { .min = 25000, .max = 350000 },
74 	.vco = { .min = 908000, .max = 1512000 },
75 	.n = { .min = 2, .max = 16 },
76 	.m = { .min = 96, .max = 140 },
77 	.m1 = { .min = 18, .max = 26 },
78 	.m2 = { .min = 6, .max = 16 },
79 	.p = { .min = 4, .max = 128 },
80 	.p1 = { .min = 1, .max = 6 },
81 	.p2 = { .dot_limit = 165000,
82 		.p2_slow = 14, .p2_fast = 7 },
83 };
84 
85 static const struct intel_limit intel_limits_i9xx_sdvo = {
86 	.dot = { .min = 20000, .max = 400000 },
87 	.vco = { .min = 1400000, .max = 2800000 },
88 	.n = { .min = 1, .max = 6 },
89 	.m = { .min = 70, .max = 120 },
90 	.m1 = { .min = 8, .max = 18 },
91 	.m2 = { .min = 3, .max = 7 },
92 	.p = { .min = 5, .max = 80 },
93 	.p1 = { .min = 1, .max = 8 },
94 	.p2 = { .dot_limit = 200000,
95 		.p2_slow = 10, .p2_fast = 5 },
96 };
97 
98 static const struct intel_limit intel_limits_i9xx_lvds = {
99 	.dot = { .min = 20000, .max = 400000 },
100 	.vco = { .min = 1400000, .max = 2800000 },
101 	.n = { .min = 1, .max = 6 },
102 	.m = { .min = 70, .max = 120 },
103 	.m1 = { .min = 8, .max = 18 },
104 	.m2 = { .min = 3, .max = 7 },
105 	.p = { .min = 7, .max = 98 },
106 	.p1 = { .min = 1, .max = 8 },
107 	.p2 = { .dot_limit = 112000,
108 		.p2_slow = 14, .p2_fast = 7 },
109 };
110 
111 
112 static const struct intel_limit intel_limits_g4x_sdvo = {
113 	.dot = { .min = 25000, .max = 270000 },
114 	.vco = { .min = 1750000, .max = 3500000},
115 	.n = { .min = 1, .max = 4 },
116 	.m = { .min = 104, .max = 138 },
117 	.m1 = { .min = 17, .max = 23 },
118 	.m2 = { .min = 5, .max = 11 },
119 	.p = { .min = 10, .max = 30 },
120 	.p1 = { .min = 1, .max = 3},
121 	.p2 = { .dot_limit = 270000,
122 		.p2_slow = 10,
123 		.p2_fast = 10
124 	},
125 };
126 
127 static const struct intel_limit intel_limits_g4x_hdmi = {
128 	.dot = { .min = 22000, .max = 400000 },
129 	.vco = { .min = 1750000, .max = 3500000},
130 	.n = { .min = 1, .max = 4 },
131 	.m = { .min = 104, .max = 138 },
132 	.m1 = { .min = 16, .max = 23 },
133 	.m2 = { .min = 5, .max = 11 },
134 	.p = { .min = 5, .max = 80 },
135 	.p1 = { .min = 1, .max = 8},
136 	.p2 = { .dot_limit = 165000,
137 		.p2_slow = 10, .p2_fast = 5 },
138 };
139 
140 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
141 	.dot = { .min = 20000, .max = 115000 },
142 	.vco = { .min = 1750000, .max = 3500000 },
143 	.n = { .min = 1, .max = 3 },
144 	.m = { .min = 104, .max = 138 },
145 	.m1 = { .min = 17, .max = 23 },
146 	.m2 = { .min = 5, .max = 11 },
147 	.p = { .min = 28, .max = 112 },
148 	.p1 = { .min = 2, .max = 8 },
149 	.p2 = { .dot_limit = 0,
150 		.p2_slow = 14, .p2_fast = 14
151 	},
152 };
153 
154 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
155 	.dot = { .min = 80000, .max = 224000 },
156 	.vco = { .min = 1750000, .max = 3500000 },
157 	.n = { .min = 1, .max = 3 },
158 	.m = { .min = 104, .max = 138 },
159 	.m1 = { .min = 17, .max = 23 },
160 	.m2 = { .min = 5, .max = 11 },
161 	.p = { .min = 14, .max = 42 },
162 	.p1 = { .min = 2, .max = 6 },
163 	.p2 = { .dot_limit = 0,
164 		.p2_slow = 7, .p2_fast = 7
165 	},
166 };
167 
168 static const struct intel_limit pnv_limits_sdvo = {
169 	.dot = { .min = 20000, .max = 400000},
170 	.vco = { .min = 1700000, .max = 3500000 },
171 	/* Pineview's Ncounter is a ring counter */
172 	.n = { .min = 3, .max = 6 },
173 	.m = { .min = 2, .max = 256 },
174 	/* Pineview only has one combined m divider, which we treat as m2. */
175 	.m1 = { .min = 0, .max = 0 },
176 	.m2 = { .min = 0, .max = 254 },
177 	.p = { .min = 5, .max = 80 },
178 	.p1 = { .min = 1, .max = 8 },
179 	.p2 = { .dot_limit = 200000,
180 		.p2_slow = 10, .p2_fast = 5 },
181 };
182 
183 static const struct intel_limit pnv_limits_lvds = {
184 	.dot = { .min = 20000, .max = 400000 },
185 	.vco = { .min = 1700000, .max = 3500000 },
186 	.n = { .min = 3, .max = 6 },
187 	.m = { .min = 2, .max = 256 },
188 	.m1 = { .min = 0, .max = 0 },
189 	.m2 = { .min = 0, .max = 254 },
190 	.p = { .min = 7, .max = 112 },
191 	.p1 = { .min = 1, .max = 8 },
192 	.p2 = { .dot_limit = 112000,
193 		.p2_slow = 14, .p2_fast = 14 },
194 };
195 
196 /* Ironlake / Sandybridge
197  *
198  * We calculate clock using (register_value + 2) for N/M1/M2, so here
199  * the range value for them is (actual_value - 2).
200  */
201 static const struct intel_limit ilk_limits_dac = {
202 	.dot = { .min = 25000, .max = 350000 },
203 	.vco = { .min = 1760000, .max = 3510000 },
204 	.n = { .min = 1, .max = 5 },
205 	.m = { .min = 79, .max = 127 },
206 	.m1 = { .min = 12, .max = 22 },
207 	.m2 = { .min = 5, .max = 9 },
208 	.p = { .min = 5, .max = 80 },
209 	.p1 = { .min = 1, .max = 8 },
210 	.p2 = { .dot_limit = 225000,
211 		.p2_slow = 10, .p2_fast = 5 },
212 };
213 
214 static const struct intel_limit ilk_limits_single_lvds = {
215 	.dot = { .min = 25000, .max = 350000 },
216 	.vco = { .min = 1760000, .max = 3510000 },
217 	.n = { .min = 1, .max = 3 },
218 	.m = { .min = 79, .max = 118 },
219 	.m1 = { .min = 12, .max = 22 },
220 	.m2 = { .min = 5, .max = 9 },
221 	.p = { .min = 28, .max = 112 },
222 	.p1 = { .min = 2, .max = 8 },
223 	.p2 = { .dot_limit = 225000,
224 		.p2_slow = 14, .p2_fast = 14 },
225 };
226 
227 static const struct intel_limit ilk_limits_dual_lvds = {
228 	.dot = { .min = 25000, .max = 350000 },
229 	.vco = { .min = 1760000, .max = 3510000 },
230 	.n = { .min = 1, .max = 3 },
231 	.m = { .min = 79, .max = 127 },
232 	.m1 = { .min = 12, .max = 22 },
233 	.m2 = { .min = 5, .max = 9 },
234 	.p = { .min = 14, .max = 56 },
235 	.p1 = { .min = 2, .max = 8 },
236 	.p2 = { .dot_limit = 225000,
237 		.p2_slow = 7, .p2_fast = 7 },
238 };
239 
240 /* LVDS 100mhz refclk limits. */
241 static const struct intel_limit ilk_limits_single_lvds_100m = {
242 	.dot = { .min = 25000, .max = 350000 },
243 	.vco = { .min = 1760000, .max = 3510000 },
244 	.n = { .min = 1, .max = 2 },
245 	.m = { .min = 79, .max = 126 },
246 	.m1 = { .min = 12, .max = 22 },
247 	.m2 = { .min = 5, .max = 9 },
248 	.p = { .min = 28, .max = 112 },
249 	.p1 = { .min = 2, .max = 8 },
250 	.p2 = { .dot_limit = 225000,
251 		.p2_slow = 14, .p2_fast = 14 },
252 };
253 
254 static const struct intel_limit ilk_limits_dual_lvds_100m = {
255 	.dot = { .min = 25000, .max = 350000 },
256 	.vco = { .min = 1760000, .max = 3510000 },
257 	.n = { .min = 1, .max = 3 },
258 	.m = { .min = 79, .max = 126 },
259 	.m1 = { .min = 12, .max = 22 },
260 	.m2 = { .min = 5, .max = 9 },
261 	.p = { .min = 14, .max = 42 },
262 	.p1 = { .min = 2, .max = 6 },
263 	.p2 = { .dot_limit = 225000,
264 		.p2_slow = 7, .p2_fast = 7 },
265 };
266 
267 static const struct intel_limit intel_limits_vlv = {
268 	 /*
269 	  * These are based on the data rate limits (measured in fast clocks)
270 	  * since those are the strictest limits we have. The fast
271 	  * clock and actual rate limits are more relaxed, so checking
272 	  * them would make no difference.
273 	  */
274 	.dot = { .min = 25000, .max = 270000 },
275 	.vco = { .min = 4000000, .max = 6000000 },
276 	.n = { .min = 1, .max = 7 },
277 	.m1 = { .min = 2, .max = 3 },
278 	.m2 = { .min = 11, .max = 156 },
279 	.p1 = { .min = 2, .max = 3 },
280 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
281 };
282 
283 static const struct intel_limit intel_limits_chv = {
284 	/*
285 	 * These are based on the data rate limits (measured in fast clocks)
286 	 * since those are the strictest limits we have.  The fast
287 	 * clock and actual rate limits are more relaxed, so checking
288 	 * them would make no difference.
289 	 */
290 	.dot = { .min = 25000, .max = 540000 },
291 	.vco = { .min = 4800000, .max = 6480000 },
292 	.n = { .min = 1, .max = 1 },
293 	.m1 = { .min = 2, .max = 2 },
294 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
295 	.p1 = { .min = 2, .max = 4 },
296 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
297 };
298 
299 static const struct intel_limit intel_limits_bxt = {
300 	.dot = { .min = 25000, .max = 594000 },
301 	.vco = { .min = 4800000, .max = 6700000 },
302 	.n = { .min = 1, .max = 1 },
303 	.m1 = { .min = 2, .max = 2 },
304 	/* FIXME: find real m2 limits */
305 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
306 	.p1 = { .min = 2, .max = 4 },
307 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
308 };
309 
310 /*
311  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
312  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
313  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
314  * The helpers' return value is the rate of the clock that is fed to the
315  * display engine's pipe which can be the above fast dot clock rate or a
316  * divided-down version of it.
317  */
318 /* m1 is reserved as 0 in Pineview, n is a ring counter */
319 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
320 {
321 	clock->m = clock->m2 + 2;
322 	clock->p = clock->p1 * clock->p2;
323 
324 	clock->vco = clock->n == 0 ? 0 :
325 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
326 	clock->dot = clock->p == 0 ? 0 :
327 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
328 
329 	return clock->dot;
330 }
331 
332 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
333 {
334 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
335 }
336 
337 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
338 {
339 	clock->m = i9xx_dpll_compute_m(clock);
340 	clock->p = clock->p1 * clock->p2;
341 
342 	clock->vco = clock->n + 2 == 0 ? 0 :
343 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
344 	clock->dot = clock->p == 0 ? 0 :
345 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
346 
347 	return clock->dot;
348 }
349 
350 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
351 {
352 	clock->m = clock->m1 * clock->m2;
353 	clock->p = clock->p1 * clock->p2 * 5;
354 
355 	clock->vco = clock->n == 0 ? 0 :
356 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
357 	clock->dot = clock->p == 0 ? 0 :
358 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
359 
360 	return clock->dot;
361 }
362 
363 int chv_calc_dpll_params(int refclk, struct dpll *clock)
364 {
365 	clock->m = clock->m1 * clock->m2;
366 	clock->p = clock->p1 * clock->p2 * 5;
367 
368 	clock->vco = clock->n == 0 ? 0 :
369 		DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
370 	clock->dot = clock->p == 0 ? 0 :
371 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
372 
373 	return clock->dot;
374 }
375 
376 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
377 {
378 	struct intel_display *display = to_intel_display(crtc_state);
379 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
380 
381 	if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
382 		return display->vbt.lvds_ssc_freq;
383 	else if (HAS_PCH_SPLIT(display))
384 		return 120000;
385 	else if (DISPLAY_VER(display) != 2)
386 		return 96000;
387 	else
388 		return 48000;
389 }
390 
391 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
392 			    struct intel_dpll_hw_state *dpll_hw_state)
393 {
394 	struct intel_display *display = to_intel_display(crtc);
395 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
396 
397 	if (DISPLAY_VER(display) >= 4) {
398 		u32 tmp;
399 
400 		/* No way to read it out on pipes B and C */
401 		if (display->platform.cherryview && crtc->pipe != PIPE_A)
402 			tmp = display->state.chv_dpll_md[crtc->pipe];
403 		else
404 			tmp = intel_de_read(display,
405 					    DPLL_MD(display, crtc->pipe));
406 
407 		hw_state->dpll_md = tmp;
408 	}
409 
410 	hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
411 
412 	if (!display->platform.valleyview && !display->platform.cherryview) {
413 		hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
414 		hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
415 	} else {
416 		/* Mask out read-only status bits. */
417 		hw_state->dpll &= ~(DPLL_LOCK_VLV |
418 				    DPLL_PORTC_READY_MASK |
419 				    DPLL_PORTB_READY_MASK);
420 	}
421 }
422 
423 /* Returns the clock of the currently programmed mode of the given pipe. */
424 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
425 {
426 	struct intel_display *display = to_intel_display(crtc_state);
427 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
428 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
429 	u32 dpll = hw_state->dpll;
430 	u32 fp;
431 	struct dpll clock;
432 	int port_clock;
433 	int refclk = i9xx_pll_refclk(crtc_state);
434 
435 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
436 		fp = hw_state->fp0;
437 	else
438 		fp = hw_state->fp1;
439 
440 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
441 	if (display->platform.pineview) {
442 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
443 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
444 	} else {
445 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
446 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
447 	}
448 
449 	if (DISPLAY_VER(display) != 2) {
450 		if (display->platform.pineview)
451 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
452 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
453 		else
454 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
455 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
456 
457 		switch (dpll & DPLL_MODE_MASK) {
458 		case DPLLB_MODE_DAC_SERIAL:
459 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
460 				5 : 10;
461 			break;
462 		case DPLLB_MODE_LVDS:
463 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
464 				7 : 14;
465 			break;
466 		default:
467 			drm_dbg_kms(display->drm,
468 				    "Unknown DPLL mode %08x in programmed "
469 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
470 			return;
471 		}
472 
473 		if (display->platform.pineview)
474 			port_clock = pnv_calc_dpll_params(refclk, &clock);
475 		else
476 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
477 	} else {
478 		enum pipe lvds_pipe;
479 
480 		if (display->platform.i85x &&
481 		    intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
482 		    lvds_pipe == crtc->pipe) {
483 			u32 lvds = intel_de_read(display, LVDS);
484 
485 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
486 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
487 
488 			if (lvds & LVDS_CLKB_POWER_UP)
489 				clock.p2 = 7;
490 			else
491 				clock.p2 = 14;
492 		} else {
493 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
494 				clock.p1 = 2;
495 			else {
496 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
497 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
498 			}
499 			if (dpll & PLL_P2_DIVIDE_BY_4)
500 				clock.p2 = 4;
501 			else
502 				clock.p2 = 2;
503 		}
504 
505 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
506 	}
507 
508 	/*
509 	 * This value includes pixel_multiplier. We will use
510 	 * port_clock to compute adjusted_mode.crtc_clock in the
511 	 * encoder's get_config() function.
512 	 */
513 	crtc_state->port_clock = port_clock;
514 }
515 
516 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
517 {
518 	struct intel_display *display = to_intel_display(crtc_state);
519 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
520 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
521 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
522 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
523 	int refclk = 100000;
524 	struct dpll clock;
525 	u32 tmp;
526 
527 	/* In case of DSI, DPLL will not be used */
528 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
529 		return;
530 
531 	vlv_dpio_get(display->drm);
532 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW3(ch));
533 	vlv_dpio_put(display->drm);
534 
535 	clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
536 	clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
537 	clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
538 	clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
539 	clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
540 
541 	crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
542 }
543 
544 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
545 {
546 	struct intel_display *display = to_intel_display(crtc_state);
547 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
548 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
549 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
550 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
551 	struct dpll clock;
552 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
553 	int refclk = 100000;
554 
555 	/* In case of DSI, DPLL will not be used */
556 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
557 		return;
558 
559 	vlv_dpio_get(display->drm);
560 	cmn_dw13 = vlv_dpio_read(display->drm, phy, CHV_CMN_DW13(ch));
561 	pll_dw0 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW0(ch));
562 	pll_dw1 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW1(ch));
563 	pll_dw2 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW2(ch));
564 	pll_dw3 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
565 	vlv_dpio_put(display->drm);
566 
567 	clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
568 	clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
569 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
570 		clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
571 	clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
572 	clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
573 	clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
574 
575 	crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
576 }
577 
578 /*
579  * Returns whether the given set of divisors are valid for a given refclk with
580  * the given connectors.
581  */
582 static bool intel_pll_is_valid(struct intel_display *display,
583 			       const struct intel_limit *limit,
584 			       const struct dpll *clock)
585 {
586 	if (clock->n < limit->n.min || limit->n.max < clock->n)
587 		return false;
588 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
589 		return false;
590 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
591 		return false;
592 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
593 		return false;
594 
595 	if (!display->platform.pineview &&
596 	    !display->platform.valleyview && !display->platform.cherryview &&
597 	    !display->platform.broxton && !display->platform.geminilake)
598 		if (clock->m1 <= clock->m2)
599 			return false;
600 
601 	if (!display->platform.valleyview && !display->platform.cherryview &&
602 	    !display->platform.broxton && !display->platform.geminilake) {
603 		if (clock->p < limit->p.min || limit->p.max < clock->p)
604 			return false;
605 		if (clock->m < limit->m.min || limit->m.max < clock->m)
606 			return false;
607 	}
608 
609 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
610 		return false;
611 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
612 	 * connector, etc., rather than just a single range.
613 	 */
614 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
615 		return false;
616 
617 	return true;
618 }
619 
620 static int
621 i9xx_select_p2_div(const struct intel_limit *limit,
622 		   const struct intel_crtc_state *crtc_state,
623 		   int target)
624 {
625 	struct intel_display *display = to_intel_display(crtc_state);
626 
627 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
628 		/*
629 		 * For LVDS just rely on its current settings for dual-channel.
630 		 * We haven't figured out how to reliably set up different
631 		 * single/dual channel state, if we even can.
632 		 */
633 		if (intel_is_dual_link_lvds(display))
634 			return limit->p2.p2_fast;
635 		else
636 			return limit->p2.p2_slow;
637 	} else {
638 		if (target < limit->p2.dot_limit)
639 			return limit->p2.p2_slow;
640 		else
641 			return limit->p2.p2_fast;
642 	}
643 }
644 
645 /*
646  * Returns a set of divisors for the desired target clock with the given
647  * refclk, or FALSE.
648  *
649  * Target and reference clocks are specified in kHz.
650  *
651  * If match_clock is provided, then best_clock P divider must match the P
652  * divider from @match_clock used for LVDS downclocking.
653  */
654 static bool
655 i9xx_find_best_dpll(const struct intel_limit *limit,
656 		    struct intel_crtc_state *crtc_state,
657 		    int target, int refclk,
658 		    const struct dpll *match_clock,
659 		    struct dpll *best_clock)
660 {
661 	struct intel_display *display = to_intel_display(crtc_state);
662 	struct dpll clock;
663 	int err = target;
664 
665 	memset(best_clock, 0, sizeof(*best_clock));
666 
667 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
668 
669 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
670 	     clock.m1++) {
671 		for (clock.m2 = limit->m2.min;
672 		     clock.m2 <= limit->m2.max; clock.m2++) {
673 			if (clock.m2 >= clock.m1)
674 				break;
675 			for (clock.n = limit->n.min;
676 			     clock.n <= limit->n.max; clock.n++) {
677 				for (clock.p1 = limit->p1.min;
678 					clock.p1 <= limit->p1.max; clock.p1++) {
679 					int this_err;
680 
681 					i9xx_calc_dpll_params(refclk, &clock);
682 					if (!intel_pll_is_valid(display,
683 								limit,
684 								&clock))
685 						continue;
686 					if (match_clock &&
687 					    clock.p != match_clock->p)
688 						continue;
689 
690 					this_err = abs(clock.dot - target);
691 					if (this_err < err) {
692 						*best_clock = clock;
693 						err = this_err;
694 					}
695 				}
696 			}
697 		}
698 	}
699 
700 	return (err != target);
701 }
702 
703 /*
704  * Returns a set of divisors for the desired target clock with the given
705  * refclk, or FALSE.
706  *
707  * Target and reference clocks are specified in kHz.
708  *
709  * If match_clock is provided, then best_clock P divider must match the P
710  * divider from @match_clock used for LVDS downclocking.
711  */
712 static bool
713 pnv_find_best_dpll(const struct intel_limit *limit,
714 		   struct intel_crtc_state *crtc_state,
715 		   int target, int refclk,
716 		   const struct dpll *match_clock,
717 		   struct dpll *best_clock)
718 {
719 	struct intel_display *display = to_intel_display(crtc_state);
720 	struct dpll clock;
721 	int err = target;
722 
723 	memset(best_clock, 0, sizeof(*best_clock));
724 
725 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
726 
727 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
728 	     clock.m1++) {
729 		for (clock.m2 = limit->m2.min;
730 		     clock.m2 <= limit->m2.max; clock.m2++) {
731 			for (clock.n = limit->n.min;
732 			     clock.n <= limit->n.max; clock.n++) {
733 				for (clock.p1 = limit->p1.min;
734 					clock.p1 <= limit->p1.max; clock.p1++) {
735 					int this_err;
736 
737 					pnv_calc_dpll_params(refclk, &clock);
738 					if (!intel_pll_is_valid(display,
739 								limit,
740 								&clock))
741 						continue;
742 					if (match_clock &&
743 					    clock.p != match_clock->p)
744 						continue;
745 
746 					this_err = abs(clock.dot - target);
747 					if (this_err < err) {
748 						*best_clock = clock;
749 						err = this_err;
750 					}
751 				}
752 			}
753 		}
754 	}
755 
756 	return (err != target);
757 }
758 
759 /*
760  * Returns a set of divisors for the desired target clock with the given
761  * refclk, or FALSE.
762  *
763  * Target and reference clocks are specified in kHz.
764  *
765  * If match_clock is provided, then best_clock P divider must match the P
766  * divider from @match_clock used for LVDS downclocking.
767  */
768 static bool
769 g4x_find_best_dpll(const struct intel_limit *limit,
770 		   struct intel_crtc_state *crtc_state,
771 		   int target, int refclk,
772 		   const struct dpll *match_clock,
773 		   struct dpll *best_clock)
774 {
775 	struct intel_display *display = to_intel_display(crtc_state);
776 	struct dpll clock;
777 	int max_n;
778 	bool found = false;
779 	/* approximately equals target * 0.00585 */
780 	int err_most = (target >> 8) + (target >> 9);
781 
782 	memset(best_clock, 0, sizeof(*best_clock));
783 
784 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
785 
786 	max_n = limit->n.max;
787 	/* based on hardware requirement, prefer smaller n to precision */
788 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
789 		/* based on hardware requirement, prefer larger m1,m2 */
790 		for (clock.m1 = limit->m1.max;
791 		     clock.m1 >= limit->m1.min; clock.m1--) {
792 			for (clock.m2 = limit->m2.max;
793 			     clock.m2 >= limit->m2.min; clock.m2--) {
794 				for (clock.p1 = limit->p1.max;
795 				     clock.p1 >= limit->p1.min; clock.p1--) {
796 					int this_err;
797 
798 					i9xx_calc_dpll_params(refclk, &clock);
799 					if (!intel_pll_is_valid(display,
800 								limit,
801 								&clock))
802 						continue;
803 
804 					this_err = abs(clock.dot - target);
805 					if (this_err < err_most) {
806 						*best_clock = clock;
807 						err_most = this_err;
808 						max_n = clock.n;
809 						found = true;
810 					}
811 				}
812 			}
813 		}
814 	}
815 	return found;
816 }
817 
818 /*
819  * Check if the calculated PLL configuration is more optimal compared to the
820  * best configuration and error found so far. Return the calculated error.
821  */
822 static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
823 			       const struct dpll *calculated_clock,
824 			       const struct dpll *best_clock,
825 			       unsigned int best_error_ppm,
826 			       unsigned int *error_ppm)
827 {
828 	/*
829 	 * For CHV ignore the error and consider only the P value.
830 	 * Prefer a bigger P value based on HW requirements.
831 	 */
832 	if (display->platform.cherryview) {
833 		*error_ppm = 0;
834 
835 		return calculated_clock->p > best_clock->p;
836 	}
837 
838 	if (drm_WARN_ON_ONCE(display->drm, !target_freq))
839 		return false;
840 
841 	*error_ppm = div_u64(1000000ULL *
842 				abs(target_freq - calculated_clock->dot),
843 			     target_freq);
844 	/*
845 	 * Prefer a better P value over a better (smaller) error if the error
846 	 * is small. Ensure this preference for future configurations too by
847 	 * setting the error to 0.
848 	 */
849 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
850 		*error_ppm = 0;
851 
852 		return true;
853 	}
854 
855 	return *error_ppm + 10 < best_error_ppm;
856 }
857 
858 /*
859  * Returns a set of divisors for the desired target clock with the given
860  * refclk, or FALSE.
861  */
862 static bool
863 vlv_find_best_dpll(const struct intel_limit *limit,
864 		   struct intel_crtc_state *crtc_state,
865 		   int target, int refclk,
866 		   const struct dpll *match_clock,
867 		   struct dpll *best_clock)
868 {
869 	struct intel_display *display = to_intel_display(crtc_state);
870 	struct dpll clock;
871 	unsigned int bestppm = 1000000;
872 	/* min update 19.2 MHz */
873 	int max_n = min(limit->n.max, refclk / 19200);
874 	bool found = false;
875 
876 	memset(best_clock, 0, sizeof(*best_clock));
877 
878 	/* based on hardware requirement, prefer smaller n to precision */
879 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
880 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
881 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
882 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
883 				clock.p = clock.p1 * clock.p2 * 5;
884 				/* based on hardware requirement, prefer bigger m1,m2 values */
885 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
886 					unsigned int ppm;
887 
888 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
889 								     refclk * clock.m1);
890 
891 					vlv_calc_dpll_params(refclk, &clock);
892 
893 					if (!intel_pll_is_valid(display,
894 								limit,
895 								&clock))
896 						continue;
897 
898 					if (!vlv_PLL_is_optimal(display, target,
899 								&clock,
900 								best_clock,
901 								bestppm, &ppm))
902 						continue;
903 
904 					*best_clock = clock;
905 					bestppm = ppm;
906 					found = true;
907 				}
908 			}
909 		}
910 	}
911 
912 	return found;
913 }
914 
915 /*
916  * Returns a set of divisors for the desired target clock with the given
917  * refclk, or FALSE.
918  */
919 static bool
920 chv_find_best_dpll(const struct intel_limit *limit,
921 		   struct intel_crtc_state *crtc_state,
922 		   int target, int refclk,
923 		   const struct dpll *match_clock,
924 		   struct dpll *best_clock)
925 {
926 	struct intel_display *display = to_intel_display(crtc_state);
927 	unsigned int best_error_ppm;
928 	struct dpll clock;
929 	u64 m2;
930 	int found = false;
931 
932 	memset(best_clock, 0, sizeof(*best_clock));
933 	best_error_ppm = 1000000;
934 
935 	/*
936 	 * Based on hardware doc, the n always set to 1, and m1 always
937 	 * set to 2.  If requires to support 200Mhz refclk, we need to
938 	 * revisit this because n may not 1 anymore.
939 	 */
940 	clock.n = 1;
941 	clock.m1 = 2;
942 
943 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
944 		for (clock.p2 = limit->p2.p2_fast;
945 				clock.p2 >= limit->p2.p2_slow;
946 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
947 			unsigned int error_ppm;
948 
949 			clock.p = clock.p1 * clock.p2 * 5;
950 
951 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
952 						   refclk * clock.m1);
953 
954 			if (m2 > INT_MAX/clock.m1)
955 				continue;
956 
957 			clock.m2 = m2;
958 
959 			chv_calc_dpll_params(refclk, &clock);
960 
961 			if (!intel_pll_is_valid(display, limit, &clock))
962 				continue;
963 
964 			if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
965 						best_error_ppm, &error_ppm))
966 				continue;
967 
968 			*best_clock = clock;
969 			best_error_ppm = error_ppm;
970 			found = true;
971 		}
972 	}
973 
974 	return found;
975 }
976 
977 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
978 			struct dpll *best_clock)
979 {
980 	const struct intel_limit *limit = &intel_limits_bxt;
981 	int refclk = 100000;
982 
983 	return chv_find_best_dpll(limit, crtc_state,
984 				  crtc_state->port_clock, refclk,
985 				  NULL, best_clock);
986 }
987 
988 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
989 {
990 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
991 }
992 
993 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
994 {
995 	return (1 << dpll->n) << 16 | dpll->m2;
996 }
997 
998 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
999 {
1000 	return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1001 }
1002 
1003 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1004 		     const struct dpll *clock,
1005 		     const struct dpll *reduced_clock)
1006 {
1007 	struct intel_display *display = to_intel_display(crtc_state);
1008 	u32 dpll;
1009 
1010 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1011 
1012 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1013 		dpll |= DPLLB_MODE_LVDS;
1014 	else
1015 		dpll |= DPLLB_MODE_DAC_SERIAL;
1016 
1017 	if (display->platform.i945g || display->platform.i945gm ||
1018 	    display->platform.g33 || display->platform.pineview) {
1019 		dpll |= (crtc_state->pixel_multiplier - 1)
1020 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
1021 	}
1022 
1023 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1024 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1025 		dpll |= DPLL_SDVO_HIGH_SPEED;
1026 
1027 	if (intel_crtc_has_dp_encoder(crtc_state))
1028 		dpll |= DPLL_SDVO_HIGH_SPEED;
1029 
1030 	/* compute bitmask from p1 value */
1031 	if (display->platform.g4x) {
1032 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1033 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1034 	} else if (display->platform.pineview) {
1035 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1036 		WARN_ON(reduced_clock->p1 != clock->p1);
1037 	} else {
1038 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1039 		WARN_ON(reduced_clock->p1 != clock->p1);
1040 	}
1041 
1042 	switch (clock->p2) {
1043 	case 5:
1044 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1045 		break;
1046 	case 7:
1047 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1048 		break;
1049 	case 10:
1050 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1051 		break;
1052 	case 14:
1053 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1054 		break;
1055 	}
1056 	WARN_ON(reduced_clock->p2 != clock->p2);
1057 
1058 	if (DISPLAY_VER(display) >= 4)
1059 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1060 
1061 	if (crtc_state->sdvo_tv_clock)
1062 		dpll |= PLL_REF_INPUT_TVCLKINBC;
1063 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1064 		 intel_panel_use_ssc(display))
1065 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1066 	else
1067 		dpll |= PLL_REF_INPUT_DREFCLK;
1068 
1069 	return dpll;
1070 }
1071 
1072 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1073 			      const struct dpll *clock,
1074 			      const struct dpll *reduced_clock)
1075 {
1076 	struct intel_display *display = to_intel_display(crtc_state);
1077 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1078 
1079 	if (display->platform.pineview) {
1080 		hw_state->fp0 = pnv_dpll_compute_fp(clock);
1081 		hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1082 	} else {
1083 		hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1084 		hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1085 	}
1086 
1087 	hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1088 
1089 	if (DISPLAY_VER(display) >= 4)
1090 		hw_state->dpll_md = i965_dpll_md(crtc_state);
1091 }
1092 
1093 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1094 		     const struct dpll *clock,
1095 		     const struct dpll *reduced_clock)
1096 {
1097 	struct intel_display *display = to_intel_display(crtc_state);
1098 	u32 dpll;
1099 
1100 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1101 
1102 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1103 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1104 	} else {
1105 		if (clock->p1 == 2)
1106 			dpll |= PLL_P1_DIVIDE_BY_TWO;
1107 		else
1108 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1109 		if (clock->p2 == 4)
1110 			dpll |= PLL_P2_DIVIDE_BY_4;
1111 	}
1112 	WARN_ON(reduced_clock->p1 != clock->p1);
1113 	WARN_ON(reduced_clock->p2 != clock->p2);
1114 
1115 	/*
1116 	 * Bspec:
1117 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1118 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1119 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1120 	 *  Enable) must be set to “1” in both the DPLL A Control Register
1121 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1122 	 *
1123 	 * For simplicity We simply keep both bits always enabled in
1124 	 * both DPLLS. The spec says we should disable the DVO 2X clock
1125 	 * when not needed, but this seems to work fine in practice.
1126 	 */
1127 	if (display->platform.i830 ||
1128 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1129 		dpll |= DPLL_DVO_2X_MODE;
1130 
1131 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1132 	    intel_panel_use_ssc(display))
1133 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1134 	else
1135 		dpll |= PLL_REF_INPUT_DREFCLK;
1136 
1137 	return dpll;
1138 }
1139 
1140 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1141 			      const struct dpll *clock,
1142 			      const struct dpll *reduced_clock)
1143 {
1144 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1145 
1146 	hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1147 	hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1148 
1149 	hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1150 }
1151 
1152 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1153 				  struct intel_crtc *crtc)
1154 {
1155 	struct intel_display *display = to_intel_display(state);
1156 	struct intel_crtc_state *crtc_state =
1157 		intel_atomic_get_new_crtc_state(state, crtc);
1158 	struct intel_encoder *encoder =
1159 		intel_get_crtc_new_encoder(state, crtc_state);
1160 	int ret;
1161 
1162 	if (DISPLAY_VER(display) < 11 &&
1163 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1164 		return 0;
1165 
1166 	ret = intel_dpll_compute(state, crtc, encoder);
1167 	if (ret)
1168 		return ret;
1169 
1170 	/* FIXME this is a mess */
1171 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1172 		return 0;
1173 
1174 	/* CRT dotclock is determined via other means */
1175 	if (!crtc_state->has_pch_encoder)
1176 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1177 
1178 	return 0;
1179 }
1180 
1181 static int hsw_crtc_get_dpll(struct intel_atomic_state *state,
1182 			     struct intel_crtc *crtc)
1183 {
1184 	struct intel_display *display = to_intel_display(state);
1185 	struct intel_crtc_state *crtc_state =
1186 		intel_atomic_get_new_crtc_state(state, crtc);
1187 	struct intel_encoder *encoder =
1188 		intel_get_crtc_new_encoder(state, crtc_state);
1189 
1190 	if (DISPLAY_VER(display) < 11 &&
1191 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1192 		return 0;
1193 
1194 	return intel_dpll_reserve(state, crtc, encoder);
1195 }
1196 
1197 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1198 				  struct intel_crtc *crtc)
1199 {
1200 	struct intel_crtc_state *crtc_state =
1201 		intel_atomic_get_new_crtc_state(state, crtc);
1202 	struct intel_encoder *encoder =
1203 		intel_get_crtc_new_encoder(state, crtc_state);
1204 	int ret;
1205 
1206 	ret = intel_mpllb_calc_state(crtc_state, encoder);
1207 	if (ret)
1208 		return ret;
1209 
1210 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1211 
1212 	return 0;
1213 }
1214 
1215 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1216 {
1217 	struct intel_display *display = to_intel_display(crtc_state);
1218 
1219 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1220 	    ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
1221 	     (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
1222 		return 25;
1223 
1224 	if (crtc_state->sdvo_tv_clock)
1225 		return 20;
1226 
1227 	return 21;
1228 }
1229 
1230 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1231 {
1232 	return dpll->m < factor * dpll->n;
1233 }
1234 
1235 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1236 {
1237 	u32 fp;
1238 
1239 	fp = i9xx_dpll_compute_fp(clock);
1240 	if (ilk_needs_fb_cb_tune(clock, factor))
1241 		fp |= FP_CB_TUNE;
1242 
1243 	return fp;
1244 }
1245 
1246 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1247 		    const struct dpll *clock,
1248 		    const struct dpll *reduced_clock)
1249 {
1250 	struct intel_display *display = to_intel_display(crtc_state);
1251 	u32 dpll;
1252 
1253 	dpll = DPLL_VCO_ENABLE;
1254 
1255 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1256 		dpll |= DPLLB_MODE_LVDS;
1257 	else
1258 		dpll |= DPLLB_MODE_DAC_SERIAL;
1259 
1260 	dpll |= (crtc_state->pixel_multiplier - 1)
1261 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1262 
1263 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1264 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1265 		dpll |= DPLL_SDVO_HIGH_SPEED;
1266 
1267 	if (intel_crtc_has_dp_encoder(crtc_state))
1268 		dpll |= DPLL_SDVO_HIGH_SPEED;
1269 
1270 	/*
1271 	 * The high speed IO clock is only really required for
1272 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1273 	 * possible to share the DPLL between CRT and HDMI. Enabling
1274 	 * the clock needlessly does no real harm, except use up a
1275 	 * bit of power potentially.
1276 	 *
1277 	 * We'll limit this to IVB with 3 pipes, since it has only two
1278 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1279 	 * driving PCH ports at the same time. On SNB we could do this,
1280 	 * and potentially avoid enabling the second DPLL, but it's not
1281 	 * clear if it''s a win or loss power wise. No point in doing
1282 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1283 	 */
1284 	if (INTEL_NUM_PIPES(display) == 3 &&
1285 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1286 		dpll |= DPLL_SDVO_HIGH_SPEED;
1287 
1288 	/* compute bitmask from p1 value */
1289 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1290 	/* also FPA1 */
1291 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1292 
1293 	switch (clock->p2) {
1294 	case 5:
1295 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1296 		break;
1297 	case 7:
1298 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1299 		break;
1300 	case 10:
1301 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1302 		break;
1303 	case 14:
1304 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1305 		break;
1306 	}
1307 	WARN_ON(reduced_clock->p2 != clock->p2);
1308 
1309 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1310 	    intel_panel_use_ssc(display))
1311 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1312 	else
1313 		dpll |= PLL_REF_INPUT_DREFCLK;
1314 
1315 	return dpll;
1316 }
1317 
1318 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1319 			     const struct dpll *clock,
1320 			     const struct dpll *reduced_clock)
1321 {
1322 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1323 	int factor = ilk_fb_cb_factor(crtc_state);
1324 
1325 	hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1326 	hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1327 
1328 	hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1329 }
1330 
1331 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1332 				  struct intel_crtc *crtc)
1333 {
1334 	struct intel_display *display = to_intel_display(state);
1335 	struct intel_crtc_state *crtc_state =
1336 		intel_atomic_get_new_crtc_state(state, crtc);
1337 	const struct intel_limit *limit;
1338 	int refclk = 120000;
1339 	int ret;
1340 
1341 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1342 	if (!crtc_state->has_pch_encoder)
1343 		return 0;
1344 
1345 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1346 		if (intel_panel_use_ssc(display)) {
1347 			drm_dbg_kms(display->drm,
1348 				    "using SSC reference clock of %d kHz\n",
1349 				    display->vbt.lvds_ssc_freq);
1350 			refclk = display->vbt.lvds_ssc_freq;
1351 		}
1352 
1353 		if (intel_is_dual_link_lvds(display)) {
1354 			if (refclk == 100000)
1355 				limit = &ilk_limits_dual_lvds_100m;
1356 			else
1357 				limit = &ilk_limits_dual_lvds;
1358 		} else {
1359 			if (refclk == 100000)
1360 				limit = &ilk_limits_single_lvds_100m;
1361 			else
1362 				limit = &ilk_limits_single_lvds;
1363 		}
1364 	} else {
1365 		limit = &ilk_limits_dac;
1366 	}
1367 
1368 	if (!crtc_state->clock_set &&
1369 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1370 				refclk, NULL, &crtc_state->dpll))
1371 		return -EINVAL;
1372 
1373 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1374 
1375 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1376 			 &crtc_state->dpll);
1377 
1378 	ret = intel_dpll_compute(state, crtc, NULL);
1379 	if (ret)
1380 		return ret;
1381 
1382 	crtc_state->port_clock = crtc_state->dpll.dot;
1383 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1384 
1385 	return ret;
1386 }
1387 
1388 static int ilk_crtc_get_dpll(struct intel_atomic_state *state,
1389 			     struct intel_crtc *crtc)
1390 {
1391 	struct intel_crtc_state *crtc_state =
1392 		intel_atomic_get_new_crtc_state(state, crtc);
1393 
1394 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1395 	if (!crtc_state->has_pch_encoder)
1396 		return 0;
1397 
1398 	return intel_dpll_reserve(state, crtc, NULL);
1399 }
1400 
1401 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1402 {
1403 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1404 	u32 dpll;
1405 
1406 	dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1407 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1408 
1409 	if (crtc->pipe != PIPE_A)
1410 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1411 
1412 	/* DPLL not used with DSI, but still need the rest set up */
1413 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1414 		dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1415 
1416 	return dpll;
1417 }
1418 
1419 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1420 {
1421 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1422 
1423 	hw_state->dpll = vlv_dpll(crtc_state);
1424 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1425 }
1426 
1427 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1428 {
1429 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1430 	u32 dpll;
1431 
1432 	dpll = DPLL_SSC_REF_CLK_CHV |
1433 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1434 
1435 	if (crtc->pipe != PIPE_A)
1436 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1437 
1438 	/* DPLL not used with DSI, but still need the rest set up */
1439 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1440 		dpll |= DPLL_VCO_ENABLE;
1441 
1442 	return dpll;
1443 }
1444 
1445 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1446 {
1447 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1448 
1449 	hw_state->dpll = chv_dpll(crtc_state);
1450 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1451 }
1452 
1453 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1454 				  struct intel_crtc *crtc)
1455 {
1456 	struct intel_crtc_state *crtc_state =
1457 		intel_atomic_get_new_crtc_state(state, crtc);
1458 	const struct intel_limit *limit = &intel_limits_chv;
1459 	int refclk = 100000;
1460 
1461 	if (!crtc_state->clock_set &&
1462 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1463 				refclk, NULL, &crtc_state->dpll))
1464 		return -EINVAL;
1465 
1466 	chv_calc_dpll_params(refclk, &crtc_state->dpll);
1467 
1468 	chv_compute_dpll(crtc_state);
1469 
1470 	/* FIXME this is a mess */
1471 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1472 		return 0;
1473 
1474 	crtc_state->port_clock = crtc_state->dpll.dot;
1475 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1476 
1477 	return 0;
1478 }
1479 
1480 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1481 				  struct intel_crtc *crtc)
1482 {
1483 	struct intel_crtc_state *crtc_state =
1484 		intel_atomic_get_new_crtc_state(state, crtc);
1485 	const struct intel_limit *limit = &intel_limits_vlv;
1486 	int refclk = 100000;
1487 
1488 	if (!crtc_state->clock_set &&
1489 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1490 				refclk, NULL, &crtc_state->dpll))
1491 		return -EINVAL;
1492 
1493 	vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1494 
1495 	vlv_compute_dpll(crtc_state);
1496 
1497 	/* FIXME this is a mess */
1498 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1499 		return 0;
1500 
1501 	crtc_state->port_clock = crtc_state->dpll.dot;
1502 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1503 
1504 	return 0;
1505 }
1506 
1507 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1508 				  struct intel_crtc *crtc)
1509 {
1510 	struct intel_display *display = to_intel_display(state);
1511 	struct intel_crtc_state *crtc_state =
1512 		intel_atomic_get_new_crtc_state(state, crtc);
1513 	const struct intel_limit *limit;
1514 	int refclk = 96000;
1515 
1516 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1517 		if (intel_panel_use_ssc(display)) {
1518 			refclk = display->vbt.lvds_ssc_freq;
1519 			drm_dbg_kms(display->drm,
1520 				    "using SSC reference clock of %d kHz\n",
1521 				    refclk);
1522 		}
1523 
1524 		if (intel_is_dual_link_lvds(display))
1525 			limit = &intel_limits_g4x_dual_channel_lvds;
1526 		else
1527 			limit = &intel_limits_g4x_single_channel_lvds;
1528 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1529 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1530 		limit = &intel_limits_g4x_hdmi;
1531 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1532 		limit = &intel_limits_g4x_sdvo;
1533 	} else {
1534 		/* The option is for other outputs */
1535 		limit = &intel_limits_i9xx_sdvo;
1536 	}
1537 
1538 	if (!crtc_state->clock_set &&
1539 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1540 				refclk, NULL, &crtc_state->dpll))
1541 		return -EINVAL;
1542 
1543 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1544 
1545 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1546 			  &crtc_state->dpll);
1547 
1548 	crtc_state->port_clock = crtc_state->dpll.dot;
1549 	/* FIXME this is a mess */
1550 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1551 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1552 
1553 	return 0;
1554 }
1555 
1556 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1557 				  struct intel_crtc *crtc)
1558 {
1559 	struct intel_display *display = to_intel_display(state);
1560 	struct intel_crtc_state *crtc_state =
1561 		intel_atomic_get_new_crtc_state(state, crtc);
1562 	const struct intel_limit *limit;
1563 	int refclk = 96000;
1564 
1565 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1566 		if (intel_panel_use_ssc(display)) {
1567 			refclk = display->vbt.lvds_ssc_freq;
1568 			drm_dbg_kms(display->drm,
1569 				    "using SSC reference clock of %d kHz\n",
1570 				    refclk);
1571 		}
1572 
1573 		limit = &pnv_limits_lvds;
1574 	} else {
1575 		limit = &pnv_limits_sdvo;
1576 	}
1577 
1578 	if (!crtc_state->clock_set &&
1579 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1580 				refclk, NULL, &crtc_state->dpll))
1581 		return -EINVAL;
1582 
1583 	pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1584 
1585 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1586 			  &crtc_state->dpll);
1587 
1588 	crtc_state->port_clock = crtc_state->dpll.dot;
1589 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1590 
1591 	return 0;
1592 }
1593 
1594 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1595 				   struct intel_crtc *crtc)
1596 {
1597 	struct intel_display *display = to_intel_display(state);
1598 	struct intel_crtc_state *crtc_state =
1599 		intel_atomic_get_new_crtc_state(state, crtc);
1600 	const struct intel_limit *limit;
1601 	int refclk = 96000;
1602 
1603 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1604 		if (intel_panel_use_ssc(display)) {
1605 			refclk = display->vbt.lvds_ssc_freq;
1606 			drm_dbg_kms(display->drm,
1607 				    "using SSC reference clock of %d kHz\n",
1608 				    refclk);
1609 		}
1610 
1611 		limit = &intel_limits_i9xx_lvds;
1612 	} else {
1613 		limit = &intel_limits_i9xx_sdvo;
1614 	}
1615 
1616 	if (!crtc_state->clock_set &&
1617 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1618 				 refclk, NULL, &crtc_state->dpll))
1619 		return -EINVAL;
1620 
1621 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1622 
1623 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1624 			  &crtc_state->dpll);
1625 
1626 	crtc_state->port_clock = crtc_state->dpll.dot;
1627 	/* FIXME this is a mess */
1628 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1629 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1630 
1631 	return 0;
1632 }
1633 
1634 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1635 				   struct intel_crtc *crtc)
1636 {
1637 	struct intel_display *display = to_intel_display(state);
1638 	struct intel_crtc_state *crtc_state =
1639 		intel_atomic_get_new_crtc_state(state, crtc);
1640 	const struct intel_limit *limit;
1641 	int refclk = 48000;
1642 
1643 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1644 		if (intel_panel_use_ssc(display)) {
1645 			refclk = display->vbt.lvds_ssc_freq;
1646 			drm_dbg_kms(display->drm,
1647 				    "using SSC reference clock of %d kHz\n",
1648 				    refclk);
1649 		}
1650 
1651 		limit = &intel_limits_i8xx_lvds;
1652 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1653 		limit = &intel_limits_i8xx_dvo;
1654 	} else {
1655 		limit = &intel_limits_i8xx_dac;
1656 	}
1657 
1658 	if (!crtc_state->clock_set &&
1659 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1660 				 refclk, NULL, &crtc_state->dpll))
1661 		return -EINVAL;
1662 
1663 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1664 
1665 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1666 			  &crtc_state->dpll);
1667 
1668 	crtc_state->port_clock = crtc_state->dpll.dot;
1669 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1670 
1671 	return 0;
1672 }
1673 
1674 static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
1675 	.crtc_compute_clock = hsw_crtc_compute_clock,
1676 	.crtc_get_dpll = hsw_crtc_get_dpll,
1677 };
1678 
1679 static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
1680 	.crtc_compute_clock = hsw_crtc_compute_clock,
1681 	.crtc_get_dpll = hsw_crtc_get_dpll,
1682 };
1683 
1684 static const struct intel_dpll_global_funcs dg2_dpll_funcs = {
1685 	.crtc_compute_clock = dg2_crtc_compute_clock,
1686 };
1687 
1688 static const struct intel_dpll_global_funcs hsw_dpll_funcs = {
1689 	.crtc_compute_clock = hsw_crtc_compute_clock,
1690 	.crtc_get_dpll = hsw_crtc_get_dpll,
1691 };
1692 
1693 static const struct intel_dpll_global_funcs ilk_dpll_funcs = {
1694 	.crtc_compute_clock = ilk_crtc_compute_clock,
1695 	.crtc_get_dpll = ilk_crtc_get_dpll,
1696 };
1697 
1698 static const struct intel_dpll_global_funcs chv_dpll_funcs = {
1699 	.crtc_compute_clock = chv_crtc_compute_clock,
1700 };
1701 
1702 static const struct intel_dpll_global_funcs vlv_dpll_funcs = {
1703 	.crtc_compute_clock = vlv_crtc_compute_clock,
1704 };
1705 
1706 static const struct intel_dpll_global_funcs g4x_dpll_funcs = {
1707 	.crtc_compute_clock = g4x_crtc_compute_clock,
1708 };
1709 
1710 static const struct intel_dpll_global_funcs pnv_dpll_funcs = {
1711 	.crtc_compute_clock = pnv_crtc_compute_clock,
1712 };
1713 
1714 static const struct intel_dpll_global_funcs i9xx_dpll_funcs = {
1715 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1716 };
1717 
1718 static const struct intel_dpll_global_funcs i8xx_dpll_funcs = {
1719 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1720 };
1721 
1722 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1723 				  struct intel_crtc *crtc)
1724 {
1725 	struct intel_display *display = to_intel_display(state);
1726 	struct intel_crtc_state *crtc_state =
1727 		intel_atomic_get_new_crtc_state(state, crtc);
1728 	int ret;
1729 
1730 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1731 
1732 	memset(&crtc_state->dpll_hw_state, 0,
1733 	       sizeof(crtc_state->dpll_hw_state));
1734 
1735 	if (!crtc_state->hw.enable)
1736 		return 0;
1737 
1738 	ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
1739 	if (ret) {
1740 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1741 			    crtc->base.base.id, crtc->base.name);
1742 		return ret;
1743 	}
1744 
1745 	return 0;
1746 }
1747 
1748 int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
1749 			     struct intel_crtc *crtc)
1750 {
1751 	struct intel_display *display = to_intel_display(state);
1752 	struct intel_crtc_state *crtc_state =
1753 		intel_atomic_get_new_crtc_state(state, crtc);
1754 	int ret;
1755 
1756 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1757 	drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->intel_dpll);
1758 
1759 	if (!crtc_state->hw.enable || crtc_state->intel_dpll)
1760 		return 0;
1761 
1762 	if (!display->funcs.dpll->crtc_get_dpll)
1763 		return 0;
1764 
1765 	ret = display->funcs.dpll->crtc_get_dpll(state, crtc);
1766 	if (ret) {
1767 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1768 			    crtc->base.base.id, crtc->base.name);
1769 		return ret;
1770 	}
1771 
1772 	return 0;
1773 }
1774 
1775 void
1776 intel_dpll_init_clock_hook(struct intel_display *display)
1777 {
1778 	if (HAS_LT_PHY(display))
1779 		display->funcs.dpll = &xe3plpd_dpll_funcs;
1780 	else if (DISPLAY_VER(display) >= 14)
1781 		display->funcs.dpll = &mtl_dpll_funcs;
1782 	else if (display->platform.dg2)
1783 		display->funcs.dpll = &dg2_dpll_funcs;
1784 	else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
1785 		display->funcs.dpll = &hsw_dpll_funcs;
1786 	else if (HAS_PCH_SPLIT(display))
1787 		display->funcs.dpll = &ilk_dpll_funcs;
1788 	else if (display->platform.cherryview)
1789 		display->funcs.dpll = &chv_dpll_funcs;
1790 	else if (display->platform.valleyview)
1791 		display->funcs.dpll = &vlv_dpll_funcs;
1792 	else if (display->platform.g4x)
1793 		display->funcs.dpll = &g4x_dpll_funcs;
1794 	else if (display->platform.pineview)
1795 		display->funcs.dpll = &pnv_dpll_funcs;
1796 	else if (DISPLAY_VER(display) != 2)
1797 		display->funcs.dpll = &i9xx_dpll_funcs;
1798 	else
1799 		display->funcs.dpll = &i8xx_dpll_funcs;
1800 }
1801 
1802 static bool i9xx_has_pps(struct intel_display *display)
1803 {
1804 	if (display->platform.i830)
1805 		return false;
1806 
1807 	return display->platform.pineview || display->platform.mobile;
1808 }
1809 
1810 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1811 {
1812 	struct intel_display *display = to_intel_display(crtc_state);
1813 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1814 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1815 	enum pipe pipe = crtc->pipe;
1816 	int i;
1817 
1818 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
1819 
1820 	/* PLL is protected by panel, make sure we can write it */
1821 	if (i9xx_has_pps(display))
1822 		assert_pps_unlocked(display, pipe);
1823 
1824 	intel_de_write(display, FP0(pipe), hw_state->fp0);
1825 	intel_de_write(display, FP1(pipe), hw_state->fp1);
1826 
1827 	/*
1828 	 * Apparently we need to have VGA mode enabled prior to changing
1829 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1830 	 * dividers, even though the register value does change.
1831 	 */
1832 	intel_de_write(display, DPLL(display, pipe),
1833 		       hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1834 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1835 
1836 	/* Wait for the clocks to stabilize. */
1837 	intel_de_posting_read(display, DPLL(display, pipe));
1838 	udelay(150);
1839 
1840 	if (DISPLAY_VER(display) >= 4) {
1841 		intel_de_write(display, DPLL_MD(display, pipe),
1842 			       hw_state->dpll_md);
1843 	} else {
1844 		/* The pixel multiplier can only be updated once the
1845 		 * DPLL is enabled and the clocks are stable.
1846 		 *
1847 		 * So write it again.
1848 		 */
1849 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1850 	}
1851 
1852 	/* We do this three times for luck */
1853 	for (i = 0; i < 3; i++) {
1854 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1855 		intel_de_posting_read(display, DPLL(display, pipe));
1856 		udelay(150); /* wait for warmup */
1857 	}
1858 }
1859 
1860 static void vlv_pllb_recal_opamp(struct intel_display *display,
1861 				 enum dpio_phy phy, enum dpio_channel ch)
1862 {
1863 	u32 tmp;
1864 
1865 	/*
1866 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1867 	 * and set it to a reasonable value instead.
1868 	 */
1869 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1870 	tmp &= 0xffffff00;
1871 	tmp |= 0x00000030;
1872 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1873 
1874 	tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1875 	tmp &= 0x00ffffff;
1876 	tmp |= 0x8c000000;
1877 	vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1878 
1879 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1880 	tmp &= 0xffffff00;
1881 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1882 
1883 	tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1884 	tmp &= 0x00ffffff;
1885 	tmp |= 0xb0000000;
1886 	vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1887 }
1888 
1889 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1890 {
1891 	struct intel_display *display = to_intel_display(crtc_state);
1892 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1893 	const struct dpll *clock = &crtc_state->dpll;
1894 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1895 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1896 	enum pipe pipe = crtc->pipe;
1897 	u32 tmp, coreclk;
1898 
1899 	vlv_dpio_get(display->drm);
1900 
1901 	/* See eDP HDMI DPIO driver vbios notes doc */
1902 
1903 	/* PLL B needs special handling */
1904 	if (pipe == PIPE_B)
1905 		vlv_pllb_recal_opamp(display, phy, ch);
1906 
1907 	/* Set up Tx target for periodic Rcomp update */
1908 	vlv_dpio_write(display->drm, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1909 
1910 	/* Disable target IRef on PLL */
1911 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW16(ch));
1912 	tmp &= 0x00ffffff;
1913 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW16(ch), tmp);
1914 
1915 	/* Disable fast lock */
1916 	vlv_dpio_write(display->drm, phy, VLV_CMN_DW0, 0x610);
1917 
1918 	/* Set idtafcrecal before PLL is enabled */
1919 	tmp = DPIO_M1_DIV(clock->m1) |
1920 		DPIO_M2_DIV(clock->m2) |
1921 		DPIO_P1_DIV(clock->p1) |
1922 		DPIO_P2_DIV(clock->p2) |
1923 		DPIO_N_DIV(clock->n) |
1924 		DPIO_K_DIV(1);
1925 
1926 	/*
1927 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1928 	 * but we don't support that).
1929 	 * Note: don't use the DAC post divider as it seems unstable.
1930 	 */
1931 	tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1932 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1933 
1934 	tmp |= DPIO_ENABLE_CALIBRATION;
1935 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1936 
1937 	/* Set HBR and RBR LPF coefficients */
1938 	if (crtc_state->port_clock == 162000 ||
1939 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1940 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1941 		vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x009f0003);
1942 	else
1943 		vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x00d0000f);
1944 
1945 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1946 		/* Use SSC source */
1947 		if (pipe == PIPE_A)
1948 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1949 		else
1950 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1951 	} else { /* HDMI or VGA */
1952 		/* Use bend source */
1953 		if (pipe == PIPE_A)
1954 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1955 		else
1956 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1957 	}
1958 
1959 	coreclk = vlv_dpio_read(display->drm, phy, VLV_PLL_DW7(ch));
1960 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1961 	if (intel_crtc_has_dp_encoder(crtc_state))
1962 		coreclk |= 0x01000000;
1963 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW7(ch), coreclk);
1964 
1965 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW19(ch), 0x87871000);
1966 
1967 	vlv_dpio_put(display->drm);
1968 }
1969 
1970 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1971 {
1972 	struct intel_display *display = to_intel_display(crtc_state);
1973 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1974 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1975 	enum pipe pipe = crtc->pipe;
1976 
1977 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1978 	intel_de_posting_read(display, DPLL(display, pipe));
1979 	udelay(150);
1980 
1981 	if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
1982 		drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
1983 }
1984 
1985 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1986 {
1987 	struct intel_display *display = to_intel_display(crtc_state);
1988 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1989 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1990 	enum pipe pipe = crtc->pipe;
1991 
1992 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
1993 
1994 	/* PLL is protected by panel, make sure we can write it */
1995 	assert_pps_unlocked(display, pipe);
1996 
1997 	/* Enable Refclk */
1998 	intel_de_write(display, DPLL(display, pipe),
1999 		       hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2000 
2001 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2002 		vlv_prepare_pll(crtc_state);
2003 		_vlv_enable_pll(crtc_state);
2004 	}
2005 
2006 	intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
2007 	intel_de_posting_read(display, DPLL_MD(display, pipe));
2008 }
2009 
2010 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2011 {
2012 	struct intel_display *display = to_intel_display(crtc_state);
2013 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2014 	const struct dpll *clock = &crtc_state->dpll;
2015 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2016 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2017 	u32 tmp, loopfilter, tribuf_calcntr;
2018 	u32 m2_frac;
2019 
2020 	m2_frac = clock->m2 & 0x3fffff;
2021 
2022 	vlv_dpio_get(display->drm);
2023 
2024 	/* p1 and p2 divider */
2025 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW13(ch),
2026 		       DPIO_CHV_S1_DIV(5) |
2027 		       DPIO_CHV_P1_DIV(clock->p1) |
2028 		       DPIO_CHV_P2_DIV(clock->p2) |
2029 		       DPIO_CHV_K_DIV(1));
2030 
2031 	/* Feedback post-divider - m2 */
2032 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW0(ch),
2033 		       DPIO_CHV_M2_DIV(clock->m2 >> 22));
2034 
2035 	/* Feedback refclk divider - n and m1 */
2036 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW1(ch),
2037 		       DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2038 		       DPIO_CHV_N_DIV(1));
2039 
2040 	/* M2 fraction division */
2041 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW2(ch),
2042 		       DPIO_CHV_M2_FRAC_DIV(m2_frac));
2043 
2044 	/* M2 fraction division enable */
2045 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
2046 	tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2047 	tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2048 	if (m2_frac)
2049 		tmp |= DPIO_CHV_FRAC_DIV_EN;
2050 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW3(ch), tmp);
2051 
2052 	/* Program digital lock detect threshold */
2053 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW9(ch));
2054 	tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2055 		      DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2056 	tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2057 	if (!m2_frac)
2058 		tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2059 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW9(ch), tmp);
2060 
2061 	/* Loop filter */
2062 	if (clock->vco == 5400000) {
2063 		loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2064 			DPIO_CHV_INT_COEFF(0x8) |
2065 			DPIO_CHV_GAIN_CTRL(0x1);
2066 		tribuf_calcntr = 0x9;
2067 	} else if (clock->vco <= 6200000) {
2068 		loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2069 			DPIO_CHV_INT_COEFF(0xB) |
2070 			DPIO_CHV_GAIN_CTRL(0x3);
2071 		tribuf_calcntr = 0x9;
2072 	} else if (clock->vco <= 6480000) {
2073 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2074 			DPIO_CHV_INT_COEFF(0x9) |
2075 			DPIO_CHV_GAIN_CTRL(0x3);
2076 		tribuf_calcntr = 0x8;
2077 	} else {
2078 		/* Not supported. Apply the same limits as in the max case */
2079 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2080 			DPIO_CHV_INT_COEFF(0x9) |
2081 			DPIO_CHV_GAIN_CTRL(0x3);
2082 		tribuf_calcntr = 0;
2083 	}
2084 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW6(ch), loopfilter);
2085 
2086 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW8(ch));
2087 	tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2088 	tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2089 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW8(ch), tmp);
2090 
2091 	/* AFC Recal */
2092 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch),
2093 		       vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch)) |
2094 		       DPIO_AFC_RECAL);
2095 
2096 	vlv_dpio_put(display->drm);
2097 }
2098 
2099 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2100 {
2101 	struct intel_display *display = to_intel_display(crtc_state);
2102 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2103 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2104 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2105 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2106 	enum pipe pipe = crtc->pipe;
2107 	u32 tmp;
2108 
2109 	vlv_dpio_get(display->drm);
2110 
2111 	/* Enable back the 10bit clock to display controller */
2112 	tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2113 	tmp |= DPIO_DCLKP_EN;
2114 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), tmp);
2115 
2116 	vlv_dpio_put(display->drm);
2117 
2118 	/*
2119 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2120 	 */
2121 	udelay(1);
2122 
2123 	/* Enable PLL */
2124 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2125 
2126 	/* Check PLL is locked */
2127 	if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2128 		drm_err(display->drm, "PLL %d failed to lock\n", pipe);
2129 }
2130 
2131 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2132 {
2133 	struct intel_display *display = to_intel_display(crtc_state);
2134 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2135 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2136 	enum pipe pipe = crtc->pipe;
2137 
2138 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2139 
2140 	/* PLL is protected by panel, make sure we can write it */
2141 	assert_pps_unlocked(display, pipe);
2142 
2143 	/* Enable Refclk and SSC */
2144 	intel_de_write(display, DPLL(display, pipe),
2145 		       hw_state->dpll & ~DPLL_VCO_ENABLE);
2146 
2147 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2148 		chv_prepare_pll(crtc_state);
2149 		_chv_enable_pll(crtc_state);
2150 	}
2151 
2152 	if (pipe != PIPE_A) {
2153 		/*
2154 		 * WaPixelRepeatModeFixForC0:chv
2155 		 *
2156 		 * DPLLCMD is AWOL. Use chicken bits to propagate
2157 		 * the value from DPLLBMD to either pipe B or C.
2158 		 */
2159 		intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2160 		intel_de_write(display, DPLL_MD(display, PIPE_B),
2161 			       hw_state->dpll_md);
2162 		intel_de_write(display, CBR4_VLV, 0);
2163 		display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
2164 
2165 		/*
2166 		 * DPLLB VGA mode also seems to cause problems.
2167 		 * We should always have it disabled.
2168 		 */
2169 		drm_WARN_ON(display->drm,
2170 			    (intel_de_read(display, DPLL(display, PIPE_B)) &
2171 			     DPLL_VGA_MODE_DIS) == 0);
2172 	} else {
2173 		intel_de_write(display, DPLL_MD(display, pipe),
2174 			       hw_state->dpll_md);
2175 		intel_de_posting_read(display, DPLL_MD(display, pipe));
2176 	}
2177 }
2178 
2179 /**
2180  * vlv_force_pll_on - forcibly enable just the PLL
2181  * @display: display device
2182  * @pipe: pipe PLL to enable
2183  * @dpll: PLL configuration
2184  *
2185  * Enable the PLL for @pipe using the supplied @dpll config. To be used
2186  * in cases where we need the PLL enabled even when @pipe is not going to
2187  * be enabled.
2188  */
2189 int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
2190 		     const struct dpll *dpll)
2191 {
2192 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
2193 	struct intel_crtc_state *crtc_state;
2194 
2195 	crtc_state = intel_crtc_state_alloc(crtc);
2196 	if (!crtc_state)
2197 		return -ENOMEM;
2198 
2199 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
2200 	crtc_state->pixel_multiplier = 1;
2201 	crtc_state->dpll = *dpll;
2202 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2203 
2204 	if (display->platform.cherryview) {
2205 		chv_compute_dpll(crtc_state);
2206 		chv_enable_pll(crtc_state);
2207 	} else {
2208 		vlv_compute_dpll(crtc_state);
2209 		vlv_enable_pll(crtc_state);
2210 	}
2211 
2212 	intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2213 
2214 	return 0;
2215 }
2216 
2217 void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
2218 {
2219 	u32 val;
2220 
2221 	/* Make sure the pipe isn't still relying on us */
2222 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2223 
2224 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2225 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2226 	if (pipe != PIPE_A)
2227 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2228 
2229 	intel_de_write(display, DPLL(display, pipe), val);
2230 	intel_de_posting_read(display, DPLL(display, pipe));
2231 }
2232 
2233 void chv_disable_pll(struct intel_display *display, enum pipe pipe)
2234 {
2235 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2236 	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2237 	u32 val;
2238 
2239 	/* Make sure the pipe isn't still relying on us */
2240 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2241 
2242 	val = DPLL_SSC_REF_CLK_CHV |
2243 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2244 	if (pipe != PIPE_A)
2245 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2246 
2247 	intel_de_write(display, DPLL(display, pipe), val);
2248 	intel_de_posting_read(display, DPLL(display, pipe));
2249 
2250 	vlv_dpio_get(display->drm);
2251 
2252 	/* Disable 10bit clock to display controller */
2253 	val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2254 	val &= ~DPIO_DCLKP_EN;
2255 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), val);
2256 
2257 	vlv_dpio_put(display->drm);
2258 }
2259 
2260 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2261 {
2262 	struct intel_display *display = to_intel_display(crtc_state);
2263 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2264 	enum pipe pipe = crtc->pipe;
2265 
2266 	/* Don't disable pipe or pipe PLLs if needed */
2267 	if (display->platform.i830)
2268 		return;
2269 
2270 	/* Make sure the pipe isn't still relying on us */
2271 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2272 
2273 	intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
2274 	intel_de_posting_read(display, DPLL(display, pipe));
2275 }
2276 
2277 
2278 /**
2279  * vlv_force_pll_off - forcibly disable just the PLL
2280  * @display: display device
2281  * @pipe: pipe PLL to disable
2282  *
2283  * Disable the PLL for @pipe. To be used in cases where we need
2284  * the PLL enabled even when @pipe is not going to be enabled.
2285  */
2286 void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
2287 {
2288 	if (display->platform.cherryview)
2289 		chv_disable_pll(display, pipe);
2290 	else
2291 		vlv_disable_pll(display, pipe);
2292 }
2293 
2294 /* Only for pre-ILK configs */
2295 static void assert_pll(struct intel_display *display,
2296 		       enum pipe pipe, bool state)
2297 {
2298 	bool cur_state;
2299 
2300 	cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
2301 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
2302 				 "PLL state assertion failure (expected %s, current %s)\n",
2303 				 str_on_off(state), str_on_off(cur_state));
2304 }
2305 
2306 void assert_pll_enabled(struct intel_display *display, enum pipe pipe)
2307 {
2308 	assert_pll(display, pipe, true);
2309 }
2310 
2311 void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
2312 {
2313 	assert_pll(display, pipe, false);
2314 }
2315 
2316 bool intel_dpll_clock_matches(int clock1, int clock2)
2317 {
2318 	return abs(clock1 - clock2) <= 1;
2319 }
2320