xref: /linux/drivers/gpu/drm/i915/display/intel_dpll.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include <drm/drm_print.h>
10 
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_cx0_phy.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_regs.h"
17 #include "intel_display_types.h"
18 #include "intel_dpio_phy.h"
19 #include "intel_dpll.h"
20 #include "intel_lt_phy.h"
21 #include "intel_lvds.h"
22 #include "intel_lvds_regs.h"
23 #include "intel_panel.h"
24 #include "intel_pps.h"
25 #include "intel_snps_phy.h"
26 #include "vlv_dpio_phy_regs.h"
27 #include "vlv_sideband.h"
28 
29 struct intel_dpll_global_funcs {
30 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
31 				  struct intel_crtc *crtc);
32 	int (*crtc_get_dpll)(struct intel_atomic_state *state,
33 			     struct intel_crtc *crtc);
34 };
35 
36 struct intel_limit {
37 	struct {
38 		int min, max;
39 	} dot, vco, n, m, m1, m2, p, p1;
40 
41 	struct {
42 		int dot_limit;
43 		int p2_slow, p2_fast;
44 	} p2;
45 };
46 static const struct intel_limit intel_limits_i8xx_dac = {
47 	.dot = { .min = 25000, .max = 350000 },
48 	.vco = { .min = 908000, .max = 1512000 },
49 	.n = { .min = 2, .max = 16 },
50 	.m = { .min = 96, .max = 140 },
51 	.m1 = { .min = 18, .max = 26 },
52 	.m2 = { .min = 6, .max = 16 },
53 	.p = { .min = 4, .max = 128 },
54 	.p1 = { .min = 2, .max = 33 },
55 	.p2 = { .dot_limit = 165000,
56 		.p2_slow = 4, .p2_fast = 2 },
57 };
58 
59 static const struct intel_limit intel_limits_i8xx_dvo = {
60 	.dot = { .min = 25000, .max = 350000 },
61 	.vco = { .min = 908000, .max = 1512000 },
62 	.n = { .min = 2, .max = 16 },
63 	.m = { .min = 96, .max = 140 },
64 	.m1 = { .min = 18, .max = 26 },
65 	.m2 = { .min = 6, .max = 16 },
66 	.p = { .min = 4, .max = 128 },
67 	.p1 = { .min = 2, .max = 33 },
68 	.p2 = { .dot_limit = 165000,
69 		.p2_slow = 4, .p2_fast = 4 },
70 };
71 
72 static const struct intel_limit intel_limits_i8xx_lvds = {
73 	.dot = { .min = 25000, .max = 350000 },
74 	.vco = { .min = 908000, .max = 1512000 },
75 	.n = { .min = 2, .max = 16 },
76 	.m = { .min = 96, .max = 140 },
77 	.m1 = { .min = 18, .max = 26 },
78 	.m2 = { .min = 6, .max = 16 },
79 	.p = { .min = 4, .max = 128 },
80 	.p1 = { .min = 1, .max = 6 },
81 	.p2 = { .dot_limit = 165000,
82 		.p2_slow = 14, .p2_fast = 7 },
83 };
84 
85 static const struct intel_limit intel_limits_i9xx_sdvo = {
86 	.dot = { .min = 20000, .max = 400000 },
87 	.vco = { .min = 1400000, .max = 2800000 },
88 	.n = { .min = 1, .max = 6 },
89 	.m = { .min = 70, .max = 120 },
90 	.m1 = { .min = 8, .max = 18 },
91 	.m2 = { .min = 3, .max = 7 },
92 	.p = { .min = 5, .max = 80 },
93 	.p1 = { .min = 1, .max = 8 },
94 	.p2 = { .dot_limit = 200000,
95 		.p2_slow = 10, .p2_fast = 5 },
96 };
97 
98 static const struct intel_limit intel_limits_i9xx_lvds = {
99 	.dot = { .min = 20000, .max = 400000 },
100 	.vco = { .min = 1400000, .max = 2800000 },
101 	.n = { .min = 1, .max = 6 },
102 	.m = { .min = 70, .max = 120 },
103 	.m1 = { .min = 8, .max = 18 },
104 	.m2 = { .min = 3, .max = 7 },
105 	.p = { .min = 7, .max = 98 },
106 	.p1 = { .min = 1, .max = 8 },
107 	.p2 = { .dot_limit = 112000,
108 		.p2_slow = 14, .p2_fast = 7 },
109 };
110 
111 
112 static const struct intel_limit intel_limits_g4x_sdvo = {
113 	.dot = { .min = 25000, .max = 270000 },
114 	.vco = { .min = 1750000, .max = 3500000},
115 	.n = { .min = 1, .max = 4 },
116 	.m = { .min = 104, .max = 138 },
117 	.m1 = { .min = 17, .max = 23 },
118 	.m2 = { .min = 5, .max = 11 },
119 	.p = { .min = 10, .max = 30 },
120 	.p1 = { .min = 1, .max = 3},
121 	.p2 = { .dot_limit = 270000,
122 		.p2_slow = 10,
123 		.p2_fast = 10
124 	},
125 };
126 
127 static const struct intel_limit intel_limits_g4x_hdmi = {
128 	.dot = { .min = 22000, .max = 400000 },
129 	.vco = { .min = 1750000, .max = 3500000},
130 	.n = { .min = 1, .max = 4 },
131 	.m = { .min = 104, .max = 138 },
132 	.m1 = { .min = 16, .max = 23 },
133 	.m2 = { .min = 5, .max = 11 },
134 	.p = { .min = 5, .max = 80 },
135 	.p1 = { .min = 1, .max = 8},
136 	.p2 = { .dot_limit = 165000,
137 		.p2_slow = 10, .p2_fast = 5 },
138 };
139 
140 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
141 	.dot = { .min = 20000, .max = 115000 },
142 	.vco = { .min = 1750000, .max = 3500000 },
143 	.n = { .min = 1, .max = 3 },
144 	.m = { .min = 104, .max = 138 },
145 	.m1 = { .min = 17, .max = 23 },
146 	.m2 = { .min = 5, .max = 11 },
147 	.p = { .min = 28, .max = 112 },
148 	.p1 = { .min = 2, .max = 8 },
149 	.p2 = { .dot_limit = 0,
150 		.p2_slow = 14, .p2_fast = 14
151 	},
152 };
153 
154 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
155 	.dot = { .min = 80000, .max = 224000 },
156 	.vco = { .min = 1750000, .max = 3500000 },
157 	.n = { .min = 1, .max = 3 },
158 	.m = { .min = 104, .max = 138 },
159 	.m1 = { .min = 17, .max = 23 },
160 	.m2 = { .min = 5, .max = 11 },
161 	.p = { .min = 14, .max = 42 },
162 	.p1 = { .min = 2, .max = 6 },
163 	.p2 = { .dot_limit = 0,
164 		.p2_slow = 7, .p2_fast = 7
165 	},
166 };
167 
168 static const struct intel_limit pnv_limits_sdvo = {
169 	.dot = { .min = 20000, .max = 400000},
170 	.vco = { .min = 1700000, .max = 3500000 },
171 	/* Pineview's Ncounter is a ring counter */
172 	.n = { .min = 3, .max = 6 },
173 	.m = { .min = 2, .max = 256 },
174 	/* Pineview only has one combined m divider, which we treat as m2. */
175 	.m1 = { .min = 0, .max = 0 },
176 	.m2 = { .min = 0, .max = 254 },
177 	.p = { .min = 5, .max = 80 },
178 	.p1 = { .min = 1, .max = 8 },
179 	.p2 = { .dot_limit = 200000,
180 		.p2_slow = 10, .p2_fast = 5 },
181 };
182 
183 static const struct intel_limit pnv_limits_lvds = {
184 	.dot = { .min = 20000, .max = 400000 },
185 	.vco = { .min = 1700000, .max = 3500000 },
186 	.n = { .min = 3, .max = 6 },
187 	.m = { .min = 2, .max = 256 },
188 	.m1 = { .min = 0, .max = 0 },
189 	.m2 = { .min = 0, .max = 254 },
190 	.p = { .min = 7, .max = 112 },
191 	.p1 = { .min = 1, .max = 8 },
192 	.p2 = { .dot_limit = 112000,
193 		.p2_slow = 14, .p2_fast = 14 },
194 };
195 
196 /* Ironlake / Sandybridge
197  *
198  * We calculate clock using (register_value + 2) for N/M1/M2, so here
199  * the range value for them is (actual_value - 2).
200  */
201 static const struct intel_limit ilk_limits_dac = {
202 	.dot = { .min = 25000, .max = 350000 },
203 	.vco = { .min = 1760000, .max = 3510000 },
204 	.n = { .min = 1, .max = 5 },
205 	.m = { .min = 79, .max = 127 },
206 	.m1 = { .min = 12, .max = 22 },
207 	.m2 = { .min = 5, .max = 9 },
208 	.p = { .min = 5, .max = 80 },
209 	.p1 = { .min = 1, .max = 8 },
210 	.p2 = { .dot_limit = 225000,
211 		.p2_slow = 10, .p2_fast = 5 },
212 };
213 
214 static const struct intel_limit ilk_limits_single_lvds = {
215 	.dot = { .min = 25000, .max = 350000 },
216 	.vco = { .min = 1760000, .max = 3510000 },
217 	.n = { .min = 1, .max = 3 },
218 	.m = { .min = 79, .max = 118 },
219 	.m1 = { .min = 12, .max = 22 },
220 	.m2 = { .min = 5, .max = 9 },
221 	.p = { .min = 28, .max = 112 },
222 	.p1 = { .min = 2, .max = 8 },
223 	.p2 = { .dot_limit = 225000,
224 		.p2_slow = 14, .p2_fast = 14 },
225 };
226 
227 static const struct intel_limit ilk_limits_dual_lvds = {
228 	.dot = { .min = 25000, .max = 350000 },
229 	.vco = { .min = 1760000, .max = 3510000 },
230 	.n = { .min = 1, .max = 3 },
231 	.m = { .min = 79, .max = 127 },
232 	.m1 = { .min = 12, .max = 22 },
233 	.m2 = { .min = 5, .max = 9 },
234 	.p = { .min = 14, .max = 56 },
235 	.p1 = { .min = 2, .max = 8 },
236 	.p2 = { .dot_limit = 225000,
237 		.p2_slow = 7, .p2_fast = 7 },
238 };
239 
240 /* LVDS 100mhz refclk limits. */
241 static const struct intel_limit ilk_limits_single_lvds_100m = {
242 	.dot = { .min = 25000, .max = 350000 },
243 	.vco = { .min = 1760000, .max = 3510000 },
244 	.n = { .min = 1, .max = 2 },
245 	.m = { .min = 79, .max = 126 },
246 	.m1 = { .min = 12, .max = 22 },
247 	.m2 = { .min = 5, .max = 9 },
248 	.p = { .min = 28, .max = 112 },
249 	.p1 = { .min = 2, .max = 8 },
250 	.p2 = { .dot_limit = 225000,
251 		.p2_slow = 14, .p2_fast = 14 },
252 };
253 
254 static const struct intel_limit ilk_limits_dual_lvds_100m = {
255 	.dot = { .min = 25000, .max = 350000 },
256 	.vco = { .min = 1760000, .max = 3510000 },
257 	.n = { .min = 1, .max = 3 },
258 	.m = { .min = 79, .max = 126 },
259 	.m1 = { .min = 12, .max = 22 },
260 	.m2 = { .min = 5, .max = 9 },
261 	.p = { .min = 14, .max = 42 },
262 	.p1 = { .min = 2, .max = 6 },
263 	.p2 = { .dot_limit = 225000,
264 		.p2_slow = 7, .p2_fast = 7 },
265 };
266 
267 static const struct intel_limit intel_limits_vlv = {
268 	 /*
269 	  * These are based on the data rate limits (measured in fast clocks)
270 	  * since those are the strictest limits we have. The fast
271 	  * clock and actual rate limits are more relaxed, so checking
272 	  * them would make no difference.
273 	  */
274 	.dot = { .min = 25000, .max = 270000 },
275 	.vco = { .min = 4000000, .max = 6000000 },
276 	.n = { .min = 1, .max = 7 },
277 	.m1 = { .min = 2, .max = 3 },
278 	.m2 = { .min = 11, .max = 156 },
279 	.p1 = { .min = 2, .max = 3 },
280 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
281 };
282 
283 static const struct intel_limit intel_limits_chv = {
284 	/*
285 	 * These are based on the data rate limits (measured in fast clocks)
286 	 * since those are the strictest limits we have.  The fast
287 	 * clock and actual rate limits are more relaxed, so checking
288 	 * them would make no difference.
289 	 */
290 	.dot = { .min = 25000, .max = 540000 },
291 	.vco = { .min = 4800000, .max = 6480000 },
292 	.n = { .min = 1, .max = 1 },
293 	.m1 = { .min = 2, .max = 2 },
294 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
295 	.p1 = { .min = 2, .max = 4 },
296 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
297 };
298 
299 static const struct intel_limit intel_limits_bxt = {
300 	.dot = { .min = 25000, .max = 594000 },
301 	.vco = { .min = 4800000, .max = 6700000 },
302 	.n = { .min = 1, .max = 1 },
303 	.m1 = { .min = 2, .max = 2 },
304 	/* FIXME: find real m2 limits */
305 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
306 	.p1 = { .min = 2, .max = 4 },
307 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
308 };
309 
310 /*
311  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
312  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
313  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
314  * The helpers' return value is the rate of the clock that is fed to the
315  * display engine's pipe which can be the above fast dot clock rate or a
316  * divided-down version of it.
317  */
318 /* m1 is reserved as 0 in Pineview, n is a ring counter */
319 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
320 {
321 	clock->m = clock->m2 + 2;
322 	clock->p = clock->p1 * clock->p2;
323 
324 	clock->vco = clock->n == 0 ? 0 :
325 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
326 	clock->dot = clock->p == 0 ? 0 :
327 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
328 
329 	return clock->dot;
330 }
331 
332 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
333 {
334 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
335 }
336 
337 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
338 {
339 	clock->m = i9xx_dpll_compute_m(clock);
340 	clock->p = clock->p1 * clock->p2;
341 
342 	clock->vco = clock->n + 2 == 0 ? 0 :
343 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
344 	clock->dot = clock->p == 0 ? 0 :
345 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
346 
347 	return clock->dot;
348 }
349 
350 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
351 {
352 	clock->m = clock->m1 * clock->m2;
353 	clock->p = clock->p1 * clock->p2 * 5;
354 
355 	clock->vco = clock->n == 0 ? 0 :
356 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
357 	clock->dot = clock->p == 0 ? 0 :
358 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
359 
360 	return clock->dot;
361 }
362 
363 int chv_calc_dpll_params(int refclk, struct dpll *clock)
364 {
365 	clock->m = clock->m1 * clock->m2;
366 	clock->p = clock->p1 * clock->p2 * 5;
367 
368 	clock->vco = clock->n == 0 ? 0 :
369 		DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
370 	clock->dot = clock->p == 0 ? 0 :
371 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
372 
373 	return clock->dot;
374 }
375 
376 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
377 {
378 	struct intel_display *display = to_intel_display(crtc_state);
379 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
380 
381 	if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
382 		return display->vbt.lvds_ssc_freq;
383 	else if (HAS_PCH_SPLIT(display))
384 		return 120000;
385 	else if (DISPLAY_VER(display) != 2)
386 		return 96000;
387 	else
388 		return 48000;
389 }
390 
391 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
392 			    struct intel_dpll_hw_state *dpll_hw_state)
393 {
394 	struct intel_display *display = to_intel_display(crtc);
395 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
396 
397 	if (DISPLAY_VER(display) >= 4) {
398 		u32 tmp;
399 
400 		/* No way to read it out on pipes B and C */
401 		if (display->platform.cherryview && crtc->pipe != PIPE_A)
402 			tmp = display->state.chv_dpll_md[crtc->pipe];
403 		else
404 			tmp = intel_de_read(display,
405 					    DPLL_MD(display, crtc->pipe));
406 
407 		hw_state->dpll_md = tmp;
408 	}
409 
410 	hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
411 
412 	if (!display->platform.valleyview && !display->platform.cherryview) {
413 		hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
414 		hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
415 	} else {
416 		/* Mask out read-only status bits. */
417 		hw_state->dpll &= ~(DPLL_LOCK_VLV |
418 				    DPLL_PORTC_READY_MASK |
419 				    DPLL_PORTB_READY_MASK);
420 	}
421 }
422 
423 /* Returns the clock of the currently programmed mode of the given pipe. */
424 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
425 {
426 	struct intel_display *display = to_intel_display(crtc_state);
427 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
428 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
429 	u32 dpll = hw_state->dpll;
430 	u32 fp;
431 	struct dpll clock;
432 	int port_clock;
433 	int refclk = i9xx_pll_refclk(crtc_state);
434 
435 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
436 		fp = hw_state->fp0;
437 	else
438 		fp = hw_state->fp1;
439 
440 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
441 	if (display->platform.pineview) {
442 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
443 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
444 	} else {
445 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
446 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
447 	}
448 
449 	if (DISPLAY_VER(display) != 2) {
450 		if (display->platform.pineview)
451 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
452 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
453 		else
454 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
455 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
456 
457 		switch (dpll & DPLL_MODE_MASK) {
458 		case DPLLB_MODE_DAC_SERIAL:
459 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
460 				5 : 10;
461 			break;
462 		case DPLLB_MODE_LVDS:
463 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
464 				7 : 14;
465 			break;
466 		default:
467 			drm_dbg_kms(display->drm,
468 				    "Unknown DPLL mode %08x in programmed "
469 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
470 			return;
471 		}
472 
473 		if (display->platform.pineview)
474 			port_clock = pnv_calc_dpll_params(refclk, &clock);
475 		else
476 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
477 	} else {
478 		enum pipe lvds_pipe;
479 
480 		if (display->platform.i85x &&
481 		    intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
482 		    lvds_pipe == crtc->pipe) {
483 			u32 lvds = intel_de_read(display, LVDS);
484 
485 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
486 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
487 
488 			if (lvds & LVDS_CLKB_POWER_UP)
489 				clock.p2 = 7;
490 			else
491 				clock.p2 = 14;
492 		} else {
493 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
494 				clock.p1 = 2;
495 			else {
496 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
497 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
498 			}
499 			if (dpll & PLL_P2_DIVIDE_BY_4)
500 				clock.p2 = 4;
501 			else
502 				clock.p2 = 2;
503 		}
504 
505 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
506 	}
507 
508 	/*
509 	 * This value includes pixel_multiplier. We will use
510 	 * port_clock to compute adjusted_mode.crtc_clock in the
511 	 * encoder's get_config() function.
512 	 */
513 	crtc_state->port_clock = port_clock;
514 }
515 
516 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
517 {
518 	struct intel_display *display = to_intel_display(crtc_state);
519 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
520 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
521 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
522 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
523 	int refclk = 100000;
524 	struct dpll clock;
525 	u32 tmp;
526 
527 	/* In case of DSI, DPLL will not be used */
528 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
529 		return;
530 
531 	vlv_dpio_get(display->drm);
532 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW3(ch));
533 	vlv_dpio_put(display->drm);
534 
535 	clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
536 	clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
537 	clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
538 	clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
539 	clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
540 
541 	crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
542 }
543 
544 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
545 {
546 	struct intel_display *display = to_intel_display(crtc_state);
547 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
548 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
549 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
550 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
551 	struct dpll clock;
552 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
553 	int refclk = 100000;
554 
555 	/* In case of DSI, DPLL will not be used */
556 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
557 		return;
558 
559 	vlv_dpio_get(display->drm);
560 	cmn_dw13 = vlv_dpio_read(display->drm, phy, CHV_CMN_DW13(ch));
561 	pll_dw0 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW0(ch));
562 	pll_dw1 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW1(ch));
563 	pll_dw2 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW2(ch));
564 	pll_dw3 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
565 	vlv_dpio_put(display->drm);
566 
567 	clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
568 	clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
569 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
570 		clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
571 	clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
572 	clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
573 	clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
574 
575 	crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
576 }
577 
578 /*
579  * Returns whether the given set of divisors are valid for a given refclk with
580  * the given connectors.
581  */
582 static bool intel_pll_is_valid(struct intel_display *display,
583 			       const struct intel_limit *limit,
584 			       const struct dpll *clock)
585 {
586 	if (clock->n < limit->n.min || limit->n.max < clock->n)
587 		return false;
588 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
589 		return false;
590 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
591 		return false;
592 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
593 		return false;
594 
595 	if (!display->platform.pineview &&
596 	    !display->platform.valleyview && !display->platform.cherryview &&
597 	    !display->platform.broxton && !display->platform.geminilake)
598 		if (clock->m1 <= clock->m2)
599 			return false;
600 
601 	if (!display->platform.valleyview && !display->platform.cherryview &&
602 	    !display->platform.broxton && !display->platform.geminilake) {
603 		if (clock->p < limit->p.min || limit->p.max < clock->p)
604 			return false;
605 		if (clock->m < limit->m.min || limit->m.max < clock->m)
606 			return false;
607 	}
608 
609 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
610 		return false;
611 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
612 	 * connector, etc., rather than just a single range.
613 	 */
614 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
615 		return false;
616 
617 	return true;
618 }
619 
620 static int
621 i9xx_select_p2_div(const struct intel_limit *limit,
622 		   const struct intel_crtc_state *crtc_state,
623 		   int target)
624 {
625 	struct intel_display *display = to_intel_display(crtc_state);
626 
627 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
628 		/*
629 		 * For LVDS just rely on its current settings for dual-channel.
630 		 * We haven't figured out how to reliably set up different
631 		 * single/dual channel state, if we even can.
632 		 */
633 		if (intel_is_dual_link_lvds(display))
634 			return limit->p2.p2_fast;
635 		else
636 			return limit->p2.p2_slow;
637 	} else {
638 		if (target < limit->p2.dot_limit)
639 			return limit->p2.p2_slow;
640 		else
641 			return limit->p2.p2_fast;
642 	}
643 }
644 
645 /*
646  * Returns a set of divisors for the desired target clock with the given
647  * refclk, or FALSE.
648  *
649  * Target and reference clocks are specified in kHz.
650  *
651  * If match_clock is provided, then best_clock P divider must match the P
652  * divider from @match_clock used for LVDS downclocking.
653  */
654 static bool
655 i9xx_find_best_dpll(const struct intel_limit *limit,
656 		    struct intel_crtc_state *crtc_state,
657 		    int target, int refclk,
658 		    const struct dpll *match_clock,
659 		    struct dpll *best_clock)
660 {
661 	struct intel_display *display = to_intel_display(crtc_state);
662 	struct dpll clock;
663 	int err = target;
664 
665 	memset(best_clock, 0, sizeof(*best_clock));
666 
667 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
668 
669 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
670 	     clock.m1++) {
671 		for (clock.m2 = limit->m2.min;
672 		     clock.m2 <= limit->m2.max; clock.m2++) {
673 			if (clock.m2 >= clock.m1)
674 				break;
675 			for (clock.n = limit->n.min;
676 			     clock.n <= limit->n.max; clock.n++) {
677 				for (clock.p1 = limit->p1.min;
678 					clock.p1 <= limit->p1.max; clock.p1++) {
679 					int this_err;
680 
681 					i9xx_calc_dpll_params(refclk, &clock);
682 					if (!intel_pll_is_valid(display,
683 								limit,
684 								&clock))
685 						continue;
686 					if (match_clock &&
687 					    clock.p != match_clock->p)
688 						continue;
689 
690 					this_err = abs(clock.dot - target);
691 					if (this_err < err) {
692 						*best_clock = clock;
693 						err = this_err;
694 					}
695 				}
696 			}
697 		}
698 	}
699 
700 	return (err != target);
701 }
702 
703 /*
704  * Returns a set of divisors for the desired target clock with the given
705  * refclk, or FALSE.
706  *
707  * Target and reference clocks are specified in kHz.
708  *
709  * If match_clock is provided, then best_clock P divider must match the P
710  * divider from @match_clock used for LVDS downclocking.
711  */
712 static bool
713 pnv_find_best_dpll(const struct intel_limit *limit,
714 		   struct intel_crtc_state *crtc_state,
715 		   int target, int refclk,
716 		   const struct dpll *match_clock,
717 		   struct dpll *best_clock)
718 {
719 	struct intel_display *display = to_intel_display(crtc_state);
720 	struct dpll clock;
721 	int err = target;
722 
723 	memset(best_clock, 0, sizeof(*best_clock));
724 
725 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
726 
727 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
728 	     clock.m1++) {
729 		for (clock.m2 = limit->m2.min;
730 		     clock.m2 <= limit->m2.max; clock.m2++) {
731 			for (clock.n = limit->n.min;
732 			     clock.n <= limit->n.max; clock.n++) {
733 				for (clock.p1 = limit->p1.min;
734 					clock.p1 <= limit->p1.max; clock.p1++) {
735 					int this_err;
736 
737 					pnv_calc_dpll_params(refclk, &clock);
738 					if (!intel_pll_is_valid(display,
739 								limit,
740 								&clock))
741 						continue;
742 					if (match_clock &&
743 					    clock.p != match_clock->p)
744 						continue;
745 
746 					this_err = abs(clock.dot - target);
747 					if (this_err < err) {
748 						*best_clock = clock;
749 						err = this_err;
750 					}
751 				}
752 			}
753 		}
754 	}
755 
756 	return (err != target);
757 }
758 
759 /*
760  * Returns a set of divisors for the desired target clock with the given
761  * refclk, or FALSE.
762  *
763  * Target and reference clocks are specified in kHz.
764  *
765  * If match_clock is provided, then best_clock P divider must match the P
766  * divider from @match_clock used for LVDS downclocking.
767  */
768 static bool
769 g4x_find_best_dpll(const struct intel_limit *limit,
770 		   struct intel_crtc_state *crtc_state,
771 		   int target, int refclk,
772 		   const struct dpll *match_clock,
773 		   struct dpll *best_clock)
774 {
775 	struct intel_display *display = to_intel_display(crtc_state);
776 	struct dpll clock;
777 	int max_n;
778 	bool found = false;
779 	/* approximately equals target * 0.00585 */
780 	int err_most = (target >> 8) + (target >> 9);
781 
782 	memset(best_clock, 0, sizeof(*best_clock));
783 
784 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
785 
786 	max_n = limit->n.max;
787 	/* based on hardware requirement, prefer smaller n to precision */
788 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
789 		/* based on hardware requirement, prefer larger m1,m2 */
790 		for (clock.m1 = limit->m1.max;
791 		     clock.m1 >= limit->m1.min; clock.m1--) {
792 			for (clock.m2 = limit->m2.max;
793 			     clock.m2 >= limit->m2.min; clock.m2--) {
794 				for (clock.p1 = limit->p1.max;
795 				     clock.p1 >= limit->p1.min; clock.p1--) {
796 					int this_err;
797 
798 					i9xx_calc_dpll_params(refclk, &clock);
799 					if (!intel_pll_is_valid(display,
800 								limit,
801 								&clock))
802 						continue;
803 
804 					this_err = abs(clock.dot - target);
805 					if (this_err < err_most) {
806 						*best_clock = clock;
807 						err_most = this_err;
808 						max_n = clock.n;
809 						found = true;
810 					}
811 				}
812 			}
813 		}
814 	}
815 	return found;
816 }
817 
818 /*
819  * Check if the calculated PLL configuration is more optimal compared to the
820  * best configuration and error found so far. Return the calculated error.
821  */
822 static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
823 			       const struct dpll *calculated_clock,
824 			       const struct dpll *best_clock,
825 			       unsigned int best_error_ppm,
826 			       unsigned int *error_ppm)
827 {
828 	/*
829 	 * For CHV ignore the error and consider only the P value.
830 	 * Prefer a bigger P value based on HW requirements.
831 	 */
832 	if (display->platform.cherryview) {
833 		*error_ppm = 0;
834 
835 		return calculated_clock->p > best_clock->p;
836 	}
837 
838 	if (drm_WARN_ON_ONCE(display->drm, !target_freq))
839 		return false;
840 
841 	*error_ppm = div_u64(1000000ULL *
842 				abs(target_freq - calculated_clock->dot),
843 			     target_freq);
844 	/*
845 	 * Prefer a better P value over a better (smaller) error if the error
846 	 * is small. Ensure this preference for future configurations too by
847 	 * setting the error to 0.
848 	 */
849 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
850 		*error_ppm = 0;
851 
852 		return true;
853 	}
854 
855 	return *error_ppm + 10 < best_error_ppm;
856 }
857 
858 /*
859  * Returns a set of divisors for the desired target clock with the given
860  * refclk, or FALSE.
861  */
862 static bool
863 vlv_find_best_dpll(const struct intel_limit *limit,
864 		   struct intel_crtc_state *crtc_state,
865 		   int target, int refclk,
866 		   const struct dpll *match_clock,
867 		   struct dpll *best_clock)
868 {
869 	struct intel_display *display = to_intel_display(crtc_state);
870 	struct dpll clock;
871 	unsigned int bestppm = 1000000;
872 	/* min update 19.2 MHz */
873 	int max_n = min(limit->n.max, refclk / 19200);
874 	bool found = false;
875 
876 	memset(best_clock, 0, sizeof(*best_clock));
877 
878 	/* based on hardware requirement, prefer smaller n to precision */
879 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
880 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
881 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
882 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
883 				clock.p = clock.p1 * clock.p2 * 5;
884 				/* based on hardware requirement, prefer bigger m1,m2 values */
885 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
886 					unsigned int ppm;
887 
888 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
889 								     refclk * clock.m1);
890 
891 					vlv_calc_dpll_params(refclk, &clock);
892 
893 					if (!intel_pll_is_valid(display,
894 								limit,
895 								&clock))
896 						continue;
897 
898 					if (!vlv_PLL_is_optimal(display, target,
899 								&clock,
900 								best_clock,
901 								bestppm, &ppm))
902 						continue;
903 
904 					*best_clock = clock;
905 					bestppm = ppm;
906 					found = true;
907 				}
908 			}
909 		}
910 	}
911 
912 	return found;
913 }
914 
915 /*
916  * Returns a set of divisors for the desired target clock with the given
917  * refclk, or FALSE.
918  */
919 static bool
920 chv_find_best_dpll(const struct intel_limit *limit,
921 		   struct intel_crtc_state *crtc_state,
922 		   int target, int refclk,
923 		   const struct dpll *match_clock,
924 		   struct dpll *best_clock)
925 {
926 	struct intel_display *display = to_intel_display(crtc_state);
927 	unsigned int best_error_ppm;
928 	struct dpll clock;
929 	u64 m2;
930 	int found = false;
931 
932 	memset(best_clock, 0, sizeof(*best_clock));
933 	best_error_ppm = 1000000;
934 
935 	/*
936 	 * Based on hardware doc, the n always set to 1, and m1 always
937 	 * set to 2.  If requires to support 200Mhz refclk, we need to
938 	 * revisit this because n may not 1 anymore.
939 	 */
940 	clock.n = 1;
941 	clock.m1 = 2;
942 
943 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
944 		for (clock.p2 = limit->p2.p2_fast;
945 				clock.p2 >= limit->p2.p2_slow;
946 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
947 			unsigned int error_ppm;
948 
949 			clock.p = clock.p1 * clock.p2 * 5;
950 
951 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
952 						   refclk * clock.m1);
953 
954 			if (m2 > INT_MAX/clock.m1)
955 				continue;
956 
957 			clock.m2 = m2;
958 
959 			chv_calc_dpll_params(refclk, &clock);
960 
961 			if (!intel_pll_is_valid(display, limit, &clock))
962 				continue;
963 
964 			if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
965 						best_error_ppm, &error_ppm))
966 				continue;
967 
968 			*best_clock = clock;
969 			best_error_ppm = error_ppm;
970 			found = true;
971 		}
972 	}
973 
974 	return found;
975 }
976 
977 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
978 			struct dpll *best_clock)
979 {
980 	const struct intel_limit *limit = &intel_limits_bxt;
981 	int refclk = 100000;
982 
983 	return chv_find_best_dpll(limit, crtc_state,
984 				  crtc_state->port_clock, refclk,
985 				  NULL, best_clock);
986 }
987 
988 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
989 {
990 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
991 }
992 
993 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
994 {
995 	return (1 << dpll->n) << 16 | dpll->m2;
996 }
997 
998 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
999 {
1000 	return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1001 }
1002 
1003 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1004 		     const struct dpll *clock,
1005 		     const struct dpll *reduced_clock)
1006 {
1007 	struct intel_display *display = to_intel_display(crtc_state);
1008 	u32 dpll;
1009 
1010 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1011 
1012 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1013 		dpll |= DPLLB_MODE_LVDS;
1014 	else
1015 		dpll |= DPLLB_MODE_DAC_SERIAL;
1016 
1017 	if (display->platform.i945g || display->platform.i945gm ||
1018 	    display->platform.g33 || display->platform.pineview) {
1019 		dpll |= (crtc_state->pixel_multiplier - 1)
1020 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
1021 	}
1022 
1023 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1024 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1025 		dpll |= DPLL_SDVO_HIGH_SPEED;
1026 
1027 	if (intel_crtc_has_dp_encoder(crtc_state))
1028 		dpll |= DPLL_SDVO_HIGH_SPEED;
1029 
1030 	/* compute bitmask from p1 value */
1031 	if (display->platform.g4x) {
1032 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1033 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1034 	} else if (display->platform.pineview) {
1035 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1036 		WARN_ON(reduced_clock->p1 != clock->p1);
1037 	} else {
1038 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1039 		WARN_ON(reduced_clock->p1 != clock->p1);
1040 	}
1041 
1042 	switch (clock->p2) {
1043 	case 5:
1044 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1045 		break;
1046 	case 7:
1047 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1048 		break;
1049 	case 10:
1050 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1051 		break;
1052 	case 14:
1053 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1054 		break;
1055 	}
1056 	WARN_ON(reduced_clock->p2 != clock->p2);
1057 
1058 	if (DISPLAY_VER(display) >= 4)
1059 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1060 
1061 	if (crtc_state->sdvo_tv_clock)
1062 		dpll |= PLL_REF_INPUT_TVCLKINBC;
1063 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1064 		 intel_panel_use_ssc(display))
1065 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1066 	else
1067 		dpll |= PLL_REF_INPUT_DREFCLK;
1068 
1069 	return dpll;
1070 }
1071 
1072 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1073 			      const struct dpll *clock,
1074 			      const struct dpll *reduced_clock)
1075 {
1076 	struct intel_display *display = to_intel_display(crtc_state);
1077 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1078 
1079 	if (display->platform.pineview) {
1080 		hw_state->fp0 = pnv_dpll_compute_fp(clock);
1081 		hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1082 	} else {
1083 		hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1084 		hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1085 	}
1086 
1087 	hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1088 
1089 	if (DISPLAY_VER(display) >= 4)
1090 		hw_state->dpll_md = i965_dpll_md(crtc_state);
1091 }
1092 
1093 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1094 		     const struct dpll *clock,
1095 		     const struct dpll *reduced_clock)
1096 {
1097 	struct intel_display *display = to_intel_display(crtc_state);
1098 	u32 dpll;
1099 
1100 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1101 
1102 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1103 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1104 	} else {
1105 		if (clock->p1 == 2)
1106 			dpll |= PLL_P1_DIVIDE_BY_TWO;
1107 		else
1108 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1109 		if (clock->p2 == 4)
1110 			dpll |= PLL_P2_DIVIDE_BY_4;
1111 	}
1112 	WARN_ON(reduced_clock->p1 != clock->p1);
1113 	WARN_ON(reduced_clock->p2 != clock->p2);
1114 
1115 	/*
1116 	 * Bspec:
1117 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1118 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1119 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1120 	 *  Enable) must be set to “1” in both the DPLL A Control Register
1121 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1122 	 *
1123 	 * For simplicity We simply keep both bits always enabled in
1124 	 * both DPLLS. The spec says we should disable the DVO 2X clock
1125 	 * when not needed, but this seems to work fine in practice.
1126 	 */
1127 	if (display->platform.i830 ||
1128 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1129 		dpll |= DPLL_DVO_2X_MODE;
1130 
1131 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1132 	    intel_panel_use_ssc(display))
1133 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1134 	else
1135 		dpll |= PLL_REF_INPUT_DREFCLK;
1136 
1137 	return dpll;
1138 }
1139 
1140 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1141 			      const struct dpll *clock,
1142 			      const struct dpll *reduced_clock)
1143 {
1144 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1145 
1146 	hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1147 	hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1148 
1149 	hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1150 }
1151 
1152 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1153 				  struct intel_crtc *crtc)
1154 {
1155 	struct intel_display *display = to_intel_display(state);
1156 	struct intel_crtc_state *crtc_state =
1157 		intel_atomic_get_new_crtc_state(state, crtc);
1158 	struct intel_encoder *encoder =
1159 		intel_get_crtc_new_encoder(state, crtc_state);
1160 	int ret;
1161 
1162 	if (DISPLAY_VER(display) < 11 &&
1163 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1164 		return 0;
1165 
1166 	ret = intel_dpll_compute(state, crtc, encoder);
1167 	if (ret)
1168 		return ret;
1169 
1170 	/* FIXME this is a mess */
1171 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1172 		return 0;
1173 
1174 	/* CRT dotclock is determined via other means */
1175 	if (!crtc_state->has_pch_encoder)
1176 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1177 
1178 	return 0;
1179 }
1180 
1181 static int hsw_crtc_get_dpll(struct intel_atomic_state *state,
1182 			     struct intel_crtc *crtc)
1183 {
1184 	struct intel_display *display = to_intel_display(state);
1185 	struct intel_crtc_state *crtc_state =
1186 		intel_atomic_get_new_crtc_state(state, crtc);
1187 	struct intel_encoder *encoder =
1188 		intel_get_crtc_new_encoder(state, crtc_state);
1189 
1190 	if (DISPLAY_VER(display) < 11 &&
1191 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1192 		return 0;
1193 
1194 	return intel_dpll_reserve(state, crtc, encoder);
1195 }
1196 
1197 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1198 				  struct intel_crtc *crtc)
1199 {
1200 	struct intel_crtc_state *crtc_state =
1201 		intel_atomic_get_new_crtc_state(state, crtc);
1202 	struct intel_encoder *encoder =
1203 		intel_get_crtc_new_encoder(state, crtc_state);
1204 	int ret;
1205 
1206 	ret = intel_mpllb_calc_state(crtc_state, encoder);
1207 	if (ret)
1208 		return ret;
1209 
1210 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1211 
1212 	return 0;
1213 }
1214 
1215 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1216 				  struct intel_crtc *crtc)
1217 {
1218 	struct intel_crtc_state *crtc_state =
1219 		intel_atomic_get_new_crtc_state(state, crtc);
1220 	struct intel_encoder *encoder =
1221 		intel_get_crtc_new_encoder(state, crtc_state);
1222 	int ret;
1223 
1224 	ret = intel_cx0pll_calc_state(crtc_state, encoder);
1225 	if (ret)
1226 		return ret;
1227 
1228 	/* TODO: Do the readback via intel_dpll_compute() */
1229 	crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
1230 
1231 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1232 
1233 	return 0;
1234 }
1235 
1236 static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
1237 				      struct intel_crtc *crtc)
1238 {
1239 	struct intel_crtc_state *crtc_state =
1240 		intel_atomic_get_new_crtc_state(state, crtc);
1241 	struct intel_encoder *encoder =
1242 		intel_get_crtc_new_encoder(state, crtc_state);
1243 	int ret;
1244 
1245 	ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
1246 	if (ret)
1247 		return ret;
1248 
1249 	/* TODO: Do the readback via intel_compute_shared_dplls() */
1250 	crtc_state->port_clock =
1251 			intel_lt_phy_calc_port_clock(encoder, crtc_state);
1252 
1253 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1254 
1255 	return 0;
1256 }
1257 
1258 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1259 {
1260 	struct intel_display *display = to_intel_display(crtc_state);
1261 
1262 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1263 	    ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
1264 	     (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
1265 		return 25;
1266 
1267 	if (crtc_state->sdvo_tv_clock)
1268 		return 20;
1269 
1270 	return 21;
1271 }
1272 
1273 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1274 {
1275 	return dpll->m < factor * dpll->n;
1276 }
1277 
1278 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1279 {
1280 	u32 fp;
1281 
1282 	fp = i9xx_dpll_compute_fp(clock);
1283 	if (ilk_needs_fb_cb_tune(clock, factor))
1284 		fp |= FP_CB_TUNE;
1285 
1286 	return fp;
1287 }
1288 
1289 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1290 		    const struct dpll *clock,
1291 		    const struct dpll *reduced_clock)
1292 {
1293 	struct intel_display *display = to_intel_display(crtc_state);
1294 	u32 dpll;
1295 
1296 	dpll = DPLL_VCO_ENABLE;
1297 
1298 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1299 		dpll |= DPLLB_MODE_LVDS;
1300 	else
1301 		dpll |= DPLLB_MODE_DAC_SERIAL;
1302 
1303 	dpll |= (crtc_state->pixel_multiplier - 1)
1304 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1305 
1306 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1307 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1308 		dpll |= DPLL_SDVO_HIGH_SPEED;
1309 
1310 	if (intel_crtc_has_dp_encoder(crtc_state))
1311 		dpll |= DPLL_SDVO_HIGH_SPEED;
1312 
1313 	/*
1314 	 * The high speed IO clock is only really required for
1315 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1316 	 * possible to share the DPLL between CRT and HDMI. Enabling
1317 	 * the clock needlessly does no real harm, except use up a
1318 	 * bit of power potentially.
1319 	 *
1320 	 * We'll limit this to IVB with 3 pipes, since it has only two
1321 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1322 	 * driving PCH ports at the same time. On SNB we could do this,
1323 	 * and potentially avoid enabling the second DPLL, but it's not
1324 	 * clear if it''s a win or loss power wise. No point in doing
1325 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1326 	 */
1327 	if (INTEL_NUM_PIPES(display) == 3 &&
1328 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1329 		dpll |= DPLL_SDVO_HIGH_SPEED;
1330 
1331 	/* compute bitmask from p1 value */
1332 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1333 	/* also FPA1 */
1334 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1335 
1336 	switch (clock->p2) {
1337 	case 5:
1338 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1339 		break;
1340 	case 7:
1341 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1342 		break;
1343 	case 10:
1344 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1345 		break;
1346 	case 14:
1347 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1348 		break;
1349 	}
1350 	WARN_ON(reduced_clock->p2 != clock->p2);
1351 
1352 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1353 	    intel_panel_use_ssc(display))
1354 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1355 	else
1356 		dpll |= PLL_REF_INPUT_DREFCLK;
1357 
1358 	return dpll;
1359 }
1360 
1361 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1362 			     const struct dpll *clock,
1363 			     const struct dpll *reduced_clock)
1364 {
1365 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1366 	int factor = ilk_fb_cb_factor(crtc_state);
1367 
1368 	hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1369 	hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1370 
1371 	hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1372 }
1373 
1374 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1375 				  struct intel_crtc *crtc)
1376 {
1377 	struct intel_display *display = to_intel_display(state);
1378 	struct intel_crtc_state *crtc_state =
1379 		intel_atomic_get_new_crtc_state(state, crtc);
1380 	const struct intel_limit *limit;
1381 	int refclk = 120000;
1382 	int ret;
1383 
1384 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1385 	if (!crtc_state->has_pch_encoder)
1386 		return 0;
1387 
1388 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1389 		if (intel_panel_use_ssc(display)) {
1390 			drm_dbg_kms(display->drm,
1391 				    "using SSC reference clock of %d kHz\n",
1392 				    display->vbt.lvds_ssc_freq);
1393 			refclk = display->vbt.lvds_ssc_freq;
1394 		}
1395 
1396 		if (intel_is_dual_link_lvds(display)) {
1397 			if (refclk == 100000)
1398 				limit = &ilk_limits_dual_lvds_100m;
1399 			else
1400 				limit = &ilk_limits_dual_lvds;
1401 		} else {
1402 			if (refclk == 100000)
1403 				limit = &ilk_limits_single_lvds_100m;
1404 			else
1405 				limit = &ilk_limits_single_lvds;
1406 		}
1407 	} else {
1408 		limit = &ilk_limits_dac;
1409 	}
1410 
1411 	if (!crtc_state->clock_set &&
1412 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1413 				refclk, NULL, &crtc_state->dpll))
1414 		return -EINVAL;
1415 
1416 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1417 
1418 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1419 			 &crtc_state->dpll);
1420 
1421 	ret = intel_dpll_compute(state, crtc, NULL);
1422 	if (ret)
1423 		return ret;
1424 
1425 	crtc_state->port_clock = crtc_state->dpll.dot;
1426 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1427 
1428 	return ret;
1429 }
1430 
1431 static int ilk_crtc_get_dpll(struct intel_atomic_state *state,
1432 			     struct intel_crtc *crtc)
1433 {
1434 	struct intel_crtc_state *crtc_state =
1435 		intel_atomic_get_new_crtc_state(state, crtc);
1436 
1437 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1438 	if (!crtc_state->has_pch_encoder)
1439 		return 0;
1440 
1441 	return intel_dpll_reserve(state, crtc, NULL);
1442 }
1443 
1444 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1445 {
1446 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1447 	u32 dpll;
1448 
1449 	dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1450 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1451 
1452 	if (crtc->pipe != PIPE_A)
1453 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1454 
1455 	/* DPLL not used with DSI, but still need the rest set up */
1456 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1457 		dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1458 
1459 	return dpll;
1460 }
1461 
1462 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1463 {
1464 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1465 
1466 	hw_state->dpll = vlv_dpll(crtc_state);
1467 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1468 }
1469 
1470 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1471 {
1472 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1473 	u32 dpll;
1474 
1475 	dpll = DPLL_SSC_REF_CLK_CHV |
1476 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1477 
1478 	if (crtc->pipe != PIPE_A)
1479 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1480 
1481 	/* DPLL not used with DSI, but still need the rest set up */
1482 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1483 		dpll |= DPLL_VCO_ENABLE;
1484 
1485 	return dpll;
1486 }
1487 
1488 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1489 {
1490 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1491 
1492 	hw_state->dpll = chv_dpll(crtc_state);
1493 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1494 }
1495 
1496 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1497 				  struct intel_crtc *crtc)
1498 {
1499 	struct intel_crtc_state *crtc_state =
1500 		intel_atomic_get_new_crtc_state(state, crtc);
1501 	const struct intel_limit *limit = &intel_limits_chv;
1502 	int refclk = 100000;
1503 
1504 	if (!crtc_state->clock_set &&
1505 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1506 				refclk, NULL, &crtc_state->dpll))
1507 		return -EINVAL;
1508 
1509 	chv_calc_dpll_params(refclk, &crtc_state->dpll);
1510 
1511 	chv_compute_dpll(crtc_state);
1512 
1513 	/* FIXME this is a mess */
1514 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1515 		return 0;
1516 
1517 	crtc_state->port_clock = crtc_state->dpll.dot;
1518 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1519 
1520 	return 0;
1521 }
1522 
1523 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1524 				  struct intel_crtc *crtc)
1525 {
1526 	struct intel_crtc_state *crtc_state =
1527 		intel_atomic_get_new_crtc_state(state, crtc);
1528 	const struct intel_limit *limit = &intel_limits_vlv;
1529 	int refclk = 100000;
1530 
1531 	if (!crtc_state->clock_set &&
1532 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1533 				refclk, NULL, &crtc_state->dpll))
1534 		return -EINVAL;
1535 
1536 	vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1537 
1538 	vlv_compute_dpll(crtc_state);
1539 
1540 	/* FIXME this is a mess */
1541 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1542 		return 0;
1543 
1544 	crtc_state->port_clock = crtc_state->dpll.dot;
1545 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1546 
1547 	return 0;
1548 }
1549 
1550 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1551 				  struct intel_crtc *crtc)
1552 {
1553 	struct intel_display *display = to_intel_display(state);
1554 	struct intel_crtc_state *crtc_state =
1555 		intel_atomic_get_new_crtc_state(state, crtc);
1556 	const struct intel_limit *limit;
1557 	int refclk = 96000;
1558 
1559 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1560 		if (intel_panel_use_ssc(display)) {
1561 			refclk = display->vbt.lvds_ssc_freq;
1562 			drm_dbg_kms(display->drm,
1563 				    "using SSC reference clock of %d kHz\n",
1564 				    refclk);
1565 		}
1566 
1567 		if (intel_is_dual_link_lvds(display))
1568 			limit = &intel_limits_g4x_dual_channel_lvds;
1569 		else
1570 			limit = &intel_limits_g4x_single_channel_lvds;
1571 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1572 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1573 		limit = &intel_limits_g4x_hdmi;
1574 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1575 		limit = &intel_limits_g4x_sdvo;
1576 	} else {
1577 		/* The option is for other outputs */
1578 		limit = &intel_limits_i9xx_sdvo;
1579 	}
1580 
1581 	if (!crtc_state->clock_set &&
1582 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1583 				refclk, NULL, &crtc_state->dpll))
1584 		return -EINVAL;
1585 
1586 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1587 
1588 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1589 			  &crtc_state->dpll);
1590 
1591 	crtc_state->port_clock = crtc_state->dpll.dot;
1592 	/* FIXME this is a mess */
1593 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1594 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1595 
1596 	return 0;
1597 }
1598 
1599 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1600 				  struct intel_crtc *crtc)
1601 {
1602 	struct intel_display *display = to_intel_display(state);
1603 	struct intel_crtc_state *crtc_state =
1604 		intel_atomic_get_new_crtc_state(state, crtc);
1605 	const struct intel_limit *limit;
1606 	int refclk = 96000;
1607 
1608 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1609 		if (intel_panel_use_ssc(display)) {
1610 			refclk = display->vbt.lvds_ssc_freq;
1611 			drm_dbg_kms(display->drm,
1612 				    "using SSC reference clock of %d kHz\n",
1613 				    refclk);
1614 		}
1615 
1616 		limit = &pnv_limits_lvds;
1617 	} else {
1618 		limit = &pnv_limits_sdvo;
1619 	}
1620 
1621 	if (!crtc_state->clock_set &&
1622 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1623 				refclk, NULL, &crtc_state->dpll))
1624 		return -EINVAL;
1625 
1626 	pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1627 
1628 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1629 			  &crtc_state->dpll);
1630 
1631 	crtc_state->port_clock = crtc_state->dpll.dot;
1632 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1633 
1634 	return 0;
1635 }
1636 
1637 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1638 				   struct intel_crtc *crtc)
1639 {
1640 	struct intel_display *display = to_intel_display(state);
1641 	struct intel_crtc_state *crtc_state =
1642 		intel_atomic_get_new_crtc_state(state, crtc);
1643 	const struct intel_limit *limit;
1644 	int refclk = 96000;
1645 
1646 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1647 		if (intel_panel_use_ssc(display)) {
1648 			refclk = display->vbt.lvds_ssc_freq;
1649 			drm_dbg_kms(display->drm,
1650 				    "using SSC reference clock of %d kHz\n",
1651 				    refclk);
1652 		}
1653 
1654 		limit = &intel_limits_i9xx_lvds;
1655 	} else {
1656 		limit = &intel_limits_i9xx_sdvo;
1657 	}
1658 
1659 	if (!crtc_state->clock_set &&
1660 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1661 				 refclk, NULL, &crtc_state->dpll))
1662 		return -EINVAL;
1663 
1664 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1665 
1666 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1667 			  &crtc_state->dpll);
1668 
1669 	crtc_state->port_clock = crtc_state->dpll.dot;
1670 	/* FIXME this is a mess */
1671 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1672 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1673 
1674 	return 0;
1675 }
1676 
1677 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1678 				   struct intel_crtc *crtc)
1679 {
1680 	struct intel_display *display = to_intel_display(state);
1681 	struct intel_crtc_state *crtc_state =
1682 		intel_atomic_get_new_crtc_state(state, crtc);
1683 	const struct intel_limit *limit;
1684 	int refclk = 48000;
1685 
1686 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1687 		if (intel_panel_use_ssc(display)) {
1688 			refclk = display->vbt.lvds_ssc_freq;
1689 			drm_dbg_kms(display->drm,
1690 				    "using SSC reference clock of %d kHz\n",
1691 				    refclk);
1692 		}
1693 
1694 		limit = &intel_limits_i8xx_lvds;
1695 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1696 		limit = &intel_limits_i8xx_dvo;
1697 	} else {
1698 		limit = &intel_limits_i8xx_dac;
1699 	}
1700 
1701 	if (!crtc_state->clock_set &&
1702 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1703 				 refclk, NULL, &crtc_state->dpll))
1704 		return -EINVAL;
1705 
1706 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1707 
1708 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1709 			  &crtc_state->dpll);
1710 
1711 	crtc_state->port_clock = crtc_state->dpll.dot;
1712 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1713 
1714 	return 0;
1715 }
1716 
1717 static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
1718 	.crtc_compute_clock = xe3plpd_crtc_compute_clock,
1719 };
1720 
1721 static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
1722 	.crtc_compute_clock = mtl_crtc_compute_clock,
1723 };
1724 
1725 static const struct intel_dpll_global_funcs dg2_dpll_funcs = {
1726 	.crtc_compute_clock = dg2_crtc_compute_clock,
1727 };
1728 
1729 static const struct intel_dpll_global_funcs hsw_dpll_funcs = {
1730 	.crtc_compute_clock = hsw_crtc_compute_clock,
1731 	.crtc_get_dpll = hsw_crtc_get_dpll,
1732 };
1733 
1734 static const struct intel_dpll_global_funcs ilk_dpll_funcs = {
1735 	.crtc_compute_clock = ilk_crtc_compute_clock,
1736 	.crtc_get_dpll = ilk_crtc_get_dpll,
1737 };
1738 
1739 static const struct intel_dpll_global_funcs chv_dpll_funcs = {
1740 	.crtc_compute_clock = chv_crtc_compute_clock,
1741 };
1742 
1743 static const struct intel_dpll_global_funcs vlv_dpll_funcs = {
1744 	.crtc_compute_clock = vlv_crtc_compute_clock,
1745 };
1746 
1747 static const struct intel_dpll_global_funcs g4x_dpll_funcs = {
1748 	.crtc_compute_clock = g4x_crtc_compute_clock,
1749 };
1750 
1751 static const struct intel_dpll_global_funcs pnv_dpll_funcs = {
1752 	.crtc_compute_clock = pnv_crtc_compute_clock,
1753 };
1754 
1755 static const struct intel_dpll_global_funcs i9xx_dpll_funcs = {
1756 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1757 };
1758 
1759 static const struct intel_dpll_global_funcs i8xx_dpll_funcs = {
1760 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1761 };
1762 
1763 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1764 				  struct intel_crtc *crtc)
1765 {
1766 	struct intel_display *display = to_intel_display(state);
1767 	struct intel_crtc_state *crtc_state =
1768 		intel_atomic_get_new_crtc_state(state, crtc);
1769 	int ret;
1770 
1771 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1772 
1773 	memset(&crtc_state->dpll_hw_state, 0,
1774 	       sizeof(crtc_state->dpll_hw_state));
1775 
1776 	if (!crtc_state->hw.enable)
1777 		return 0;
1778 
1779 	ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
1780 	if (ret) {
1781 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1782 			    crtc->base.base.id, crtc->base.name);
1783 		return ret;
1784 	}
1785 
1786 	return 0;
1787 }
1788 
1789 int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
1790 			     struct intel_crtc *crtc)
1791 {
1792 	struct intel_display *display = to_intel_display(state);
1793 	struct intel_crtc_state *crtc_state =
1794 		intel_atomic_get_new_crtc_state(state, crtc);
1795 	int ret;
1796 
1797 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1798 	drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->intel_dpll);
1799 
1800 	if (!crtc_state->hw.enable || crtc_state->intel_dpll)
1801 		return 0;
1802 
1803 	if (!display->funcs.dpll->crtc_get_dpll)
1804 		return 0;
1805 
1806 	ret = display->funcs.dpll->crtc_get_dpll(state, crtc);
1807 	if (ret) {
1808 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1809 			    crtc->base.base.id, crtc->base.name);
1810 		return ret;
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 void
1817 intel_dpll_init_clock_hook(struct intel_display *display)
1818 {
1819 	if (HAS_LT_PHY(display))
1820 		display->funcs.dpll = &xe3plpd_dpll_funcs;
1821 	else if (DISPLAY_VER(display) >= 14)
1822 		display->funcs.dpll = &mtl_dpll_funcs;
1823 	else if (display->platform.dg2)
1824 		display->funcs.dpll = &dg2_dpll_funcs;
1825 	else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
1826 		display->funcs.dpll = &hsw_dpll_funcs;
1827 	else if (HAS_PCH_SPLIT(display))
1828 		display->funcs.dpll = &ilk_dpll_funcs;
1829 	else if (display->platform.cherryview)
1830 		display->funcs.dpll = &chv_dpll_funcs;
1831 	else if (display->platform.valleyview)
1832 		display->funcs.dpll = &vlv_dpll_funcs;
1833 	else if (display->platform.g4x)
1834 		display->funcs.dpll = &g4x_dpll_funcs;
1835 	else if (display->platform.pineview)
1836 		display->funcs.dpll = &pnv_dpll_funcs;
1837 	else if (DISPLAY_VER(display) != 2)
1838 		display->funcs.dpll = &i9xx_dpll_funcs;
1839 	else
1840 		display->funcs.dpll = &i8xx_dpll_funcs;
1841 }
1842 
1843 static bool i9xx_has_pps(struct intel_display *display)
1844 {
1845 	if (display->platform.i830)
1846 		return false;
1847 
1848 	return display->platform.pineview || display->platform.mobile;
1849 }
1850 
1851 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1852 {
1853 	struct intel_display *display = to_intel_display(crtc_state);
1854 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1855 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1856 	enum pipe pipe = crtc->pipe;
1857 	int i;
1858 
1859 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
1860 
1861 	/* PLL is protected by panel, make sure we can write it */
1862 	if (i9xx_has_pps(display))
1863 		assert_pps_unlocked(display, pipe);
1864 
1865 	intel_de_write(display, FP0(pipe), hw_state->fp0);
1866 	intel_de_write(display, FP1(pipe), hw_state->fp1);
1867 
1868 	/*
1869 	 * Apparently we need to have VGA mode enabled prior to changing
1870 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1871 	 * dividers, even though the register value does change.
1872 	 */
1873 	intel_de_write(display, DPLL(display, pipe),
1874 		       hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1875 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1876 
1877 	/* Wait for the clocks to stabilize. */
1878 	intel_de_posting_read(display, DPLL(display, pipe));
1879 	udelay(150);
1880 
1881 	if (DISPLAY_VER(display) >= 4) {
1882 		intel_de_write(display, DPLL_MD(display, pipe),
1883 			       hw_state->dpll_md);
1884 	} else {
1885 		/* The pixel multiplier can only be updated once the
1886 		 * DPLL is enabled and the clocks are stable.
1887 		 *
1888 		 * So write it again.
1889 		 */
1890 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1891 	}
1892 
1893 	/* We do this three times for luck */
1894 	for (i = 0; i < 3; i++) {
1895 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1896 		intel_de_posting_read(display, DPLL(display, pipe));
1897 		udelay(150); /* wait for warmup */
1898 	}
1899 }
1900 
1901 static void vlv_pllb_recal_opamp(struct intel_display *display,
1902 				 enum dpio_phy phy, enum dpio_channel ch)
1903 {
1904 	u32 tmp;
1905 
1906 	/*
1907 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1908 	 * and set it to a reasonable value instead.
1909 	 */
1910 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1911 	tmp &= 0xffffff00;
1912 	tmp |= 0x00000030;
1913 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1914 
1915 	tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1916 	tmp &= 0x00ffffff;
1917 	tmp |= 0x8c000000;
1918 	vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1919 
1920 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1921 	tmp &= 0xffffff00;
1922 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1923 
1924 	tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1925 	tmp &= 0x00ffffff;
1926 	tmp |= 0xb0000000;
1927 	vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1928 }
1929 
1930 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1931 {
1932 	struct intel_display *display = to_intel_display(crtc_state);
1933 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1934 	const struct dpll *clock = &crtc_state->dpll;
1935 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1936 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1937 	enum pipe pipe = crtc->pipe;
1938 	u32 tmp, coreclk;
1939 
1940 	vlv_dpio_get(display->drm);
1941 
1942 	/* See eDP HDMI DPIO driver vbios notes doc */
1943 
1944 	/* PLL B needs special handling */
1945 	if (pipe == PIPE_B)
1946 		vlv_pllb_recal_opamp(display, phy, ch);
1947 
1948 	/* Set up Tx target for periodic Rcomp update */
1949 	vlv_dpio_write(display->drm, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1950 
1951 	/* Disable target IRef on PLL */
1952 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW16(ch));
1953 	tmp &= 0x00ffffff;
1954 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW16(ch), tmp);
1955 
1956 	/* Disable fast lock */
1957 	vlv_dpio_write(display->drm, phy, VLV_CMN_DW0, 0x610);
1958 
1959 	/* Set idtafcrecal before PLL is enabled */
1960 	tmp = DPIO_M1_DIV(clock->m1) |
1961 		DPIO_M2_DIV(clock->m2) |
1962 		DPIO_P1_DIV(clock->p1) |
1963 		DPIO_P2_DIV(clock->p2) |
1964 		DPIO_N_DIV(clock->n) |
1965 		DPIO_K_DIV(1);
1966 
1967 	/*
1968 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1969 	 * but we don't support that).
1970 	 * Note: don't use the DAC post divider as it seems unstable.
1971 	 */
1972 	tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1973 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1974 
1975 	tmp |= DPIO_ENABLE_CALIBRATION;
1976 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1977 
1978 	/* Set HBR and RBR LPF coefficients */
1979 	if (crtc_state->port_clock == 162000 ||
1980 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1981 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1982 		vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x009f0003);
1983 	else
1984 		vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x00d0000f);
1985 
1986 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1987 		/* Use SSC source */
1988 		if (pipe == PIPE_A)
1989 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1990 		else
1991 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1992 	} else { /* HDMI or VGA */
1993 		/* Use bend source */
1994 		if (pipe == PIPE_A)
1995 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1996 		else
1997 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1998 	}
1999 
2000 	coreclk = vlv_dpio_read(display->drm, phy, VLV_PLL_DW7(ch));
2001 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
2002 	if (intel_crtc_has_dp_encoder(crtc_state))
2003 		coreclk |= 0x01000000;
2004 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW7(ch), coreclk);
2005 
2006 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW19(ch), 0x87871000);
2007 
2008 	vlv_dpio_put(display->drm);
2009 }
2010 
2011 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2012 {
2013 	struct intel_display *display = to_intel_display(crtc_state);
2014 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2015 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2016 	enum pipe pipe = crtc->pipe;
2017 
2018 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2019 	intel_de_posting_read(display, DPLL(display, pipe));
2020 	udelay(150);
2021 
2022 	if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2023 		drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
2024 }
2025 
2026 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2027 {
2028 	struct intel_display *display = to_intel_display(crtc_state);
2029 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2030 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2031 	enum pipe pipe = crtc->pipe;
2032 
2033 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2034 
2035 	/* PLL is protected by panel, make sure we can write it */
2036 	assert_pps_unlocked(display, pipe);
2037 
2038 	/* Enable Refclk */
2039 	intel_de_write(display, DPLL(display, pipe),
2040 		       hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2041 
2042 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2043 		vlv_prepare_pll(crtc_state);
2044 		_vlv_enable_pll(crtc_state);
2045 	}
2046 
2047 	intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
2048 	intel_de_posting_read(display, DPLL_MD(display, pipe));
2049 }
2050 
2051 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2052 {
2053 	struct intel_display *display = to_intel_display(crtc_state);
2054 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2055 	const struct dpll *clock = &crtc_state->dpll;
2056 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2057 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2058 	u32 tmp, loopfilter, tribuf_calcntr;
2059 	u32 m2_frac;
2060 
2061 	m2_frac = clock->m2 & 0x3fffff;
2062 
2063 	vlv_dpio_get(display->drm);
2064 
2065 	/* p1 and p2 divider */
2066 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW13(ch),
2067 		       DPIO_CHV_S1_DIV(5) |
2068 		       DPIO_CHV_P1_DIV(clock->p1) |
2069 		       DPIO_CHV_P2_DIV(clock->p2) |
2070 		       DPIO_CHV_K_DIV(1));
2071 
2072 	/* Feedback post-divider - m2 */
2073 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW0(ch),
2074 		       DPIO_CHV_M2_DIV(clock->m2 >> 22));
2075 
2076 	/* Feedback refclk divider - n and m1 */
2077 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW1(ch),
2078 		       DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2079 		       DPIO_CHV_N_DIV(1));
2080 
2081 	/* M2 fraction division */
2082 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW2(ch),
2083 		       DPIO_CHV_M2_FRAC_DIV(m2_frac));
2084 
2085 	/* M2 fraction division enable */
2086 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
2087 	tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2088 	tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2089 	if (m2_frac)
2090 		tmp |= DPIO_CHV_FRAC_DIV_EN;
2091 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW3(ch), tmp);
2092 
2093 	/* Program digital lock detect threshold */
2094 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW9(ch));
2095 	tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2096 		      DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2097 	tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2098 	if (!m2_frac)
2099 		tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2100 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW9(ch), tmp);
2101 
2102 	/* Loop filter */
2103 	if (clock->vco == 5400000) {
2104 		loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2105 			DPIO_CHV_INT_COEFF(0x8) |
2106 			DPIO_CHV_GAIN_CTRL(0x1);
2107 		tribuf_calcntr = 0x9;
2108 	} else if (clock->vco <= 6200000) {
2109 		loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2110 			DPIO_CHV_INT_COEFF(0xB) |
2111 			DPIO_CHV_GAIN_CTRL(0x3);
2112 		tribuf_calcntr = 0x9;
2113 	} else if (clock->vco <= 6480000) {
2114 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2115 			DPIO_CHV_INT_COEFF(0x9) |
2116 			DPIO_CHV_GAIN_CTRL(0x3);
2117 		tribuf_calcntr = 0x8;
2118 	} else {
2119 		/* Not supported. Apply the same limits as in the max case */
2120 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2121 			DPIO_CHV_INT_COEFF(0x9) |
2122 			DPIO_CHV_GAIN_CTRL(0x3);
2123 		tribuf_calcntr = 0;
2124 	}
2125 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW6(ch), loopfilter);
2126 
2127 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW8(ch));
2128 	tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2129 	tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2130 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW8(ch), tmp);
2131 
2132 	/* AFC Recal */
2133 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch),
2134 		       vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch)) |
2135 		       DPIO_AFC_RECAL);
2136 
2137 	vlv_dpio_put(display->drm);
2138 }
2139 
2140 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2141 {
2142 	struct intel_display *display = to_intel_display(crtc_state);
2143 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2144 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2145 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2146 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2147 	enum pipe pipe = crtc->pipe;
2148 	u32 tmp;
2149 
2150 	vlv_dpio_get(display->drm);
2151 
2152 	/* Enable back the 10bit clock to display controller */
2153 	tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2154 	tmp |= DPIO_DCLKP_EN;
2155 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), tmp);
2156 
2157 	vlv_dpio_put(display->drm);
2158 
2159 	/*
2160 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2161 	 */
2162 	udelay(1);
2163 
2164 	/* Enable PLL */
2165 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2166 
2167 	/* Check PLL is locked */
2168 	if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2169 		drm_err(display->drm, "PLL %d failed to lock\n", pipe);
2170 }
2171 
2172 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2173 {
2174 	struct intel_display *display = to_intel_display(crtc_state);
2175 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2176 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2177 	enum pipe pipe = crtc->pipe;
2178 
2179 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2180 
2181 	/* PLL is protected by panel, make sure we can write it */
2182 	assert_pps_unlocked(display, pipe);
2183 
2184 	/* Enable Refclk and SSC */
2185 	intel_de_write(display, DPLL(display, pipe),
2186 		       hw_state->dpll & ~DPLL_VCO_ENABLE);
2187 
2188 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2189 		chv_prepare_pll(crtc_state);
2190 		_chv_enable_pll(crtc_state);
2191 	}
2192 
2193 	if (pipe != PIPE_A) {
2194 		/*
2195 		 * WaPixelRepeatModeFixForC0:chv
2196 		 *
2197 		 * DPLLCMD is AWOL. Use chicken bits to propagate
2198 		 * the value from DPLLBMD to either pipe B or C.
2199 		 */
2200 		intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2201 		intel_de_write(display, DPLL_MD(display, PIPE_B),
2202 			       hw_state->dpll_md);
2203 		intel_de_write(display, CBR4_VLV, 0);
2204 		display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
2205 
2206 		/*
2207 		 * DPLLB VGA mode also seems to cause problems.
2208 		 * We should always have it disabled.
2209 		 */
2210 		drm_WARN_ON(display->drm,
2211 			    (intel_de_read(display, DPLL(display, PIPE_B)) &
2212 			     DPLL_VGA_MODE_DIS) == 0);
2213 	} else {
2214 		intel_de_write(display, DPLL_MD(display, pipe),
2215 			       hw_state->dpll_md);
2216 		intel_de_posting_read(display, DPLL_MD(display, pipe));
2217 	}
2218 }
2219 
2220 /**
2221  * vlv_force_pll_on - forcibly enable just the PLL
2222  * @display: display device
2223  * @pipe: pipe PLL to enable
2224  * @dpll: PLL configuration
2225  *
2226  * Enable the PLL for @pipe using the supplied @dpll config. To be used
2227  * in cases where we need the PLL enabled even when @pipe is not going to
2228  * be enabled.
2229  */
2230 int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
2231 		     const struct dpll *dpll)
2232 {
2233 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
2234 	struct intel_crtc_state *crtc_state;
2235 
2236 	crtc_state = intel_crtc_state_alloc(crtc);
2237 	if (!crtc_state)
2238 		return -ENOMEM;
2239 
2240 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
2241 	crtc_state->pixel_multiplier = 1;
2242 	crtc_state->dpll = *dpll;
2243 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2244 
2245 	if (display->platform.cherryview) {
2246 		chv_compute_dpll(crtc_state);
2247 		chv_enable_pll(crtc_state);
2248 	} else {
2249 		vlv_compute_dpll(crtc_state);
2250 		vlv_enable_pll(crtc_state);
2251 	}
2252 
2253 	intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2254 
2255 	return 0;
2256 }
2257 
2258 void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
2259 {
2260 	u32 val;
2261 
2262 	/* Make sure the pipe isn't still relying on us */
2263 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2264 
2265 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2266 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2267 	if (pipe != PIPE_A)
2268 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2269 
2270 	intel_de_write(display, DPLL(display, pipe), val);
2271 	intel_de_posting_read(display, DPLL(display, pipe));
2272 }
2273 
2274 void chv_disable_pll(struct intel_display *display, enum pipe pipe)
2275 {
2276 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2277 	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2278 	u32 val;
2279 
2280 	/* Make sure the pipe isn't still relying on us */
2281 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2282 
2283 	val = DPLL_SSC_REF_CLK_CHV |
2284 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2285 	if (pipe != PIPE_A)
2286 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2287 
2288 	intel_de_write(display, DPLL(display, pipe), val);
2289 	intel_de_posting_read(display, DPLL(display, pipe));
2290 
2291 	vlv_dpio_get(display->drm);
2292 
2293 	/* Disable 10bit clock to display controller */
2294 	val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2295 	val &= ~DPIO_DCLKP_EN;
2296 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), val);
2297 
2298 	vlv_dpio_put(display->drm);
2299 }
2300 
2301 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2302 {
2303 	struct intel_display *display = to_intel_display(crtc_state);
2304 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2305 	enum pipe pipe = crtc->pipe;
2306 
2307 	/* Don't disable pipe or pipe PLLs if needed */
2308 	if (display->platform.i830)
2309 		return;
2310 
2311 	/* Make sure the pipe isn't still relying on us */
2312 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2313 
2314 	intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
2315 	intel_de_posting_read(display, DPLL(display, pipe));
2316 }
2317 
2318 
2319 /**
2320  * vlv_force_pll_off - forcibly disable just the PLL
2321  * @display: display device
2322  * @pipe: pipe PLL to disable
2323  *
2324  * Disable the PLL for @pipe. To be used in cases where we need
2325  * the PLL enabled even when @pipe is not going to be enabled.
2326  */
2327 void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
2328 {
2329 	if (display->platform.cherryview)
2330 		chv_disable_pll(display, pipe);
2331 	else
2332 		vlv_disable_pll(display, pipe);
2333 }
2334 
2335 /* Only for pre-ILK configs */
2336 static void assert_pll(struct intel_display *display,
2337 		       enum pipe pipe, bool state)
2338 {
2339 	bool cur_state;
2340 
2341 	cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
2342 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
2343 				 "PLL state assertion failure (expected %s, current %s)\n",
2344 				 str_on_off(state), str_on_off(cur_state));
2345 }
2346 
2347 void assert_pll_enabled(struct intel_display *display, enum pipe pipe)
2348 {
2349 	assert_pll(display, pipe, true);
2350 }
2351 
2352 void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
2353 {
2354 	assert_pll(display, pipe, false);
2355 }
2356