xref: /linux/drivers/gpu/drm/i915/display/intel_dpll.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include <drm/drm_print.h>
10 
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_cx0_phy.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_regs.h"
17 #include "intel_display_types.h"
18 #include "intel_dpio_phy.h"
19 #include "intel_dpll.h"
20 #include "intel_lt_phy.h"
21 #include "intel_lvds.h"
22 #include "intel_lvds_regs.h"
23 #include "intel_panel.h"
24 #include "intel_pps.h"
25 #include "intel_snps_phy.h"
26 #include "vlv_dpio_phy_regs.h"
27 #include "vlv_sideband.h"
28 
29 struct intel_dpll_global_funcs {
30 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
31 				  struct intel_crtc *crtc);
32 	int (*crtc_get_dpll)(struct intel_atomic_state *state,
33 			     struct intel_crtc *crtc);
34 };
35 
36 struct intel_limit {
37 	struct {
38 		int min, max;
39 	} dot, vco, n, m, m1, m2, p, p1;
40 
41 	struct {
42 		int dot_limit;
43 		int p2_slow, p2_fast;
44 	} p2;
45 };
46 static const struct intel_limit intel_limits_i8xx_dac = {
47 	.dot = { .min = 25000, .max = 350000 },
48 	.vco = { .min = 908000, .max = 1512000 },
49 	.n = { .min = 2, .max = 16 },
50 	.m = { .min = 96, .max = 140 },
51 	.m1 = { .min = 18, .max = 26 },
52 	.m2 = { .min = 6, .max = 16 },
53 	.p = { .min = 4, .max = 128 },
54 	.p1 = { .min = 2, .max = 33 },
55 	.p2 = { .dot_limit = 165000,
56 		.p2_slow = 4, .p2_fast = 2 },
57 };
58 
59 static const struct intel_limit intel_limits_i8xx_dvo = {
60 	.dot = { .min = 25000, .max = 350000 },
61 	.vco = { .min = 908000, .max = 1512000 },
62 	.n = { .min = 2, .max = 16 },
63 	.m = { .min = 96, .max = 140 },
64 	.m1 = { .min = 18, .max = 26 },
65 	.m2 = { .min = 6, .max = 16 },
66 	.p = { .min = 4, .max = 128 },
67 	.p1 = { .min = 2, .max = 33 },
68 	.p2 = { .dot_limit = 165000,
69 		.p2_slow = 4, .p2_fast = 4 },
70 };
71 
72 static const struct intel_limit intel_limits_i8xx_lvds = {
73 	.dot = { .min = 25000, .max = 350000 },
74 	.vco = { .min = 908000, .max = 1512000 },
75 	.n = { .min = 2, .max = 16 },
76 	.m = { .min = 96, .max = 140 },
77 	.m1 = { .min = 18, .max = 26 },
78 	.m2 = { .min = 6, .max = 16 },
79 	.p = { .min = 4, .max = 128 },
80 	.p1 = { .min = 1, .max = 6 },
81 	.p2 = { .dot_limit = 165000,
82 		.p2_slow = 14, .p2_fast = 7 },
83 };
84 
85 static const struct intel_limit intel_limits_i9xx_sdvo = {
86 	.dot = { .min = 20000, .max = 400000 },
87 	.vco = { .min = 1400000, .max = 2800000 },
88 	.n = { .min = 1, .max = 6 },
89 	.m = { .min = 70, .max = 120 },
90 	.m1 = { .min = 8, .max = 18 },
91 	.m2 = { .min = 3, .max = 7 },
92 	.p = { .min = 5, .max = 80 },
93 	.p1 = { .min = 1, .max = 8 },
94 	.p2 = { .dot_limit = 200000,
95 		.p2_slow = 10, .p2_fast = 5 },
96 };
97 
98 static const struct intel_limit intel_limits_i9xx_lvds = {
99 	.dot = { .min = 20000, .max = 400000 },
100 	.vco = { .min = 1400000, .max = 2800000 },
101 	.n = { .min = 1, .max = 6 },
102 	.m = { .min = 70, .max = 120 },
103 	.m1 = { .min = 8, .max = 18 },
104 	.m2 = { .min = 3, .max = 7 },
105 	.p = { .min = 7, .max = 98 },
106 	.p1 = { .min = 1, .max = 8 },
107 	.p2 = { .dot_limit = 112000,
108 		.p2_slow = 14, .p2_fast = 7 },
109 };
110 
111 
112 static const struct intel_limit intel_limits_g4x_sdvo = {
113 	.dot = { .min = 25000, .max = 270000 },
114 	.vco = { .min = 1750000, .max = 3500000},
115 	.n = { .min = 1, .max = 4 },
116 	.m = { .min = 104, .max = 138 },
117 	.m1 = { .min = 17, .max = 23 },
118 	.m2 = { .min = 5, .max = 11 },
119 	.p = { .min = 10, .max = 30 },
120 	.p1 = { .min = 1, .max = 3},
121 	.p2 = { .dot_limit = 270000,
122 		.p2_slow = 10,
123 		.p2_fast = 10
124 	},
125 };
126 
127 static const struct intel_limit intel_limits_g4x_hdmi = {
128 	.dot = { .min = 22000, .max = 400000 },
129 	.vco = { .min = 1750000, .max = 3500000},
130 	.n = { .min = 1, .max = 4 },
131 	.m = { .min = 104, .max = 138 },
132 	.m1 = { .min = 16, .max = 23 },
133 	.m2 = { .min = 5, .max = 11 },
134 	.p = { .min = 5, .max = 80 },
135 	.p1 = { .min = 1, .max = 8},
136 	.p2 = { .dot_limit = 165000,
137 		.p2_slow = 10, .p2_fast = 5 },
138 };
139 
140 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
141 	.dot = { .min = 20000, .max = 115000 },
142 	.vco = { .min = 1750000, .max = 3500000 },
143 	.n = { .min = 1, .max = 3 },
144 	.m = { .min = 104, .max = 138 },
145 	.m1 = { .min = 17, .max = 23 },
146 	.m2 = { .min = 5, .max = 11 },
147 	.p = { .min = 28, .max = 112 },
148 	.p1 = { .min = 2, .max = 8 },
149 	.p2 = { .dot_limit = 0,
150 		.p2_slow = 14, .p2_fast = 14
151 	},
152 };
153 
154 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
155 	.dot = { .min = 80000, .max = 224000 },
156 	.vco = { .min = 1750000, .max = 3500000 },
157 	.n = { .min = 1, .max = 3 },
158 	.m = { .min = 104, .max = 138 },
159 	.m1 = { .min = 17, .max = 23 },
160 	.m2 = { .min = 5, .max = 11 },
161 	.p = { .min = 14, .max = 42 },
162 	.p1 = { .min = 2, .max = 6 },
163 	.p2 = { .dot_limit = 0,
164 		.p2_slow = 7, .p2_fast = 7
165 	},
166 };
167 
168 static const struct intel_limit pnv_limits_sdvo = {
169 	.dot = { .min = 20000, .max = 400000},
170 	.vco = { .min = 1700000, .max = 3500000 },
171 	/* Pineview's Ncounter is a ring counter */
172 	.n = { .min = 3, .max = 6 },
173 	.m = { .min = 2, .max = 256 },
174 	/* Pineview only has one combined m divider, which we treat as m2. */
175 	.m1 = { .min = 0, .max = 0 },
176 	.m2 = { .min = 0, .max = 254 },
177 	.p = { .min = 5, .max = 80 },
178 	.p1 = { .min = 1, .max = 8 },
179 	.p2 = { .dot_limit = 200000,
180 		.p2_slow = 10, .p2_fast = 5 },
181 };
182 
183 static const struct intel_limit pnv_limits_lvds = {
184 	.dot = { .min = 20000, .max = 400000 },
185 	.vco = { .min = 1700000, .max = 3500000 },
186 	.n = { .min = 3, .max = 6 },
187 	.m = { .min = 2, .max = 256 },
188 	.m1 = { .min = 0, .max = 0 },
189 	.m2 = { .min = 0, .max = 254 },
190 	.p = { .min = 7, .max = 112 },
191 	.p1 = { .min = 1, .max = 8 },
192 	.p2 = { .dot_limit = 112000,
193 		.p2_slow = 14, .p2_fast = 14 },
194 };
195 
196 /* Ironlake / Sandybridge
197  *
198  * We calculate clock using (register_value + 2) for N/M1/M2, so here
199  * the range value for them is (actual_value - 2).
200  */
201 static const struct intel_limit ilk_limits_dac = {
202 	.dot = { .min = 25000, .max = 350000 },
203 	.vco = { .min = 1760000, .max = 3510000 },
204 	.n = { .min = 1, .max = 5 },
205 	.m = { .min = 79, .max = 127 },
206 	.m1 = { .min = 12, .max = 22 },
207 	.m2 = { .min = 5, .max = 9 },
208 	.p = { .min = 5, .max = 80 },
209 	.p1 = { .min = 1, .max = 8 },
210 	.p2 = { .dot_limit = 225000,
211 		.p2_slow = 10, .p2_fast = 5 },
212 };
213 
214 static const struct intel_limit ilk_limits_single_lvds = {
215 	.dot = { .min = 25000, .max = 350000 },
216 	.vco = { .min = 1760000, .max = 3510000 },
217 	.n = { .min = 1, .max = 3 },
218 	.m = { .min = 79, .max = 118 },
219 	.m1 = { .min = 12, .max = 22 },
220 	.m2 = { .min = 5, .max = 9 },
221 	.p = { .min = 28, .max = 112 },
222 	.p1 = { .min = 2, .max = 8 },
223 	.p2 = { .dot_limit = 225000,
224 		.p2_slow = 14, .p2_fast = 14 },
225 };
226 
227 static const struct intel_limit ilk_limits_dual_lvds = {
228 	.dot = { .min = 25000, .max = 350000 },
229 	.vco = { .min = 1760000, .max = 3510000 },
230 	.n = { .min = 1, .max = 3 },
231 	.m = { .min = 79, .max = 127 },
232 	.m1 = { .min = 12, .max = 22 },
233 	.m2 = { .min = 5, .max = 9 },
234 	.p = { .min = 14, .max = 56 },
235 	.p1 = { .min = 2, .max = 8 },
236 	.p2 = { .dot_limit = 225000,
237 		.p2_slow = 7, .p2_fast = 7 },
238 };
239 
240 /* LVDS 100mhz refclk limits. */
241 static const struct intel_limit ilk_limits_single_lvds_100m = {
242 	.dot = { .min = 25000, .max = 350000 },
243 	.vco = { .min = 1760000, .max = 3510000 },
244 	.n = { .min = 1, .max = 2 },
245 	.m = { .min = 79, .max = 126 },
246 	.m1 = { .min = 12, .max = 22 },
247 	.m2 = { .min = 5, .max = 9 },
248 	.p = { .min = 28, .max = 112 },
249 	.p1 = { .min = 2, .max = 8 },
250 	.p2 = { .dot_limit = 225000,
251 		.p2_slow = 14, .p2_fast = 14 },
252 };
253 
254 static const struct intel_limit ilk_limits_dual_lvds_100m = {
255 	.dot = { .min = 25000, .max = 350000 },
256 	.vco = { .min = 1760000, .max = 3510000 },
257 	.n = { .min = 1, .max = 3 },
258 	.m = { .min = 79, .max = 126 },
259 	.m1 = { .min = 12, .max = 22 },
260 	.m2 = { .min = 5, .max = 9 },
261 	.p = { .min = 14, .max = 42 },
262 	.p1 = { .min = 2, .max = 6 },
263 	.p2 = { .dot_limit = 225000,
264 		.p2_slow = 7, .p2_fast = 7 },
265 };
266 
267 static const struct intel_limit intel_limits_vlv = {
268 	 /*
269 	  * These are based on the data rate limits (measured in fast clocks)
270 	  * since those are the strictest limits we have. The fast
271 	  * clock and actual rate limits are more relaxed, so checking
272 	  * them would make no difference.
273 	  */
274 	.dot = { .min = 25000, .max = 270000 },
275 	.vco = { .min = 4000000, .max = 6000000 },
276 	.n = { .min = 1, .max = 7 },
277 	.m1 = { .min = 2, .max = 3 },
278 	.m2 = { .min = 11, .max = 156 },
279 	.p1 = { .min = 2, .max = 3 },
280 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
281 };
282 
283 static const struct intel_limit intel_limits_chv = {
284 	/*
285 	 * These are based on the data rate limits (measured in fast clocks)
286 	 * since those are the strictest limits we have.  The fast
287 	 * clock and actual rate limits are more relaxed, so checking
288 	 * them would make no difference.
289 	 */
290 	.dot = { .min = 25000, .max = 540000 },
291 	.vco = { .min = 4800000, .max = 6480000 },
292 	.n = { .min = 1, .max = 1 },
293 	.m1 = { .min = 2, .max = 2 },
294 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
295 	.p1 = { .min = 2, .max = 4 },
296 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
297 };
298 
299 static const struct intel_limit intel_limits_bxt = {
300 	.dot = { .min = 25000, .max = 594000 },
301 	.vco = { .min = 4800000, .max = 6700000 },
302 	.n = { .min = 1, .max = 1 },
303 	.m1 = { .min = 2, .max = 2 },
304 	/* FIXME: find real m2 limits */
305 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
306 	.p1 = { .min = 2, .max = 4 },
307 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
308 };
309 
310 /*
311  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
312  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
313  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
314  * The helpers' return value is the rate of the clock that is fed to the
315  * display engine's pipe which can be the above fast dot clock rate or a
316  * divided-down version of it.
317  */
318 /* m1 is reserved as 0 in Pineview, n is a ring counter */
319 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
320 {
321 	clock->m = clock->m2 + 2;
322 	clock->p = clock->p1 * clock->p2;
323 
324 	clock->vco = clock->n == 0 ? 0 :
325 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
326 	clock->dot = clock->p == 0 ? 0 :
327 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
328 
329 	return clock->dot;
330 }
331 
332 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
333 {
334 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
335 }
336 
337 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
338 {
339 	clock->m = i9xx_dpll_compute_m(clock);
340 	clock->p = clock->p1 * clock->p2;
341 
342 	clock->vco = clock->n + 2 == 0 ? 0 :
343 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
344 	clock->dot = clock->p == 0 ? 0 :
345 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
346 
347 	return clock->dot;
348 }
349 
350 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
351 {
352 	clock->m = clock->m1 * clock->m2;
353 	clock->p = clock->p1 * clock->p2 * 5;
354 
355 	clock->vco = clock->n == 0 ? 0 :
356 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
357 	clock->dot = clock->p == 0 ? 0 :
358 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
359 
360 	return clock->dot;
361 }
362 
363 int chv_calc_dpll_params(int refclk, struct dpll *clock)
364 {
365 	clock->m = clock->m1 * clock->m2;
366 	clock->p = clock->p1 * clock->p2 * 5;
367 
368 	clock->vco = clock->n == 0 ? 0 :
369 		DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
370 	clock->dot = clock->p == 0 ? 0 :
371 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
372 
373 	return clock->dot;
374 }
375 
376 static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
377 {
378 	struct intel_display *display = to_intel_display(crtc_state);
379 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
380 
381 	if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
382 		return display->vbt.lvds_ssc_freq;
383 	else if (HAS_PCH_SPLIT(display))
384 		return 120000;
385 	else if (DISPLAY_VER(display) != 2)
386 		return 96000;
387 	else
388 		return 48000;
389 }
390 
391 void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
392 			    struct intel_dpll_hw_state *dpll_hw_state)
393 {
394 	struct intel_display *display = to_intel_display(crtc);
395 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
396 
397 	if (DISPLAY_VER(display) >= 4) {
398 		u32 tmp;
399 
400 		/* No way to read it out on pipes B and C */
401 		if (display->platform.cherryview && crtc->pipe != PIPE_A)
402 			tmp = display->state.chv_dpll_md[crtc->pipe];
403 		else
404 			tmp = intel_de_read(display,
405 					    DPLL_MD(display, crtc->pipe));
406 
407 		hw_state->dpll_md = tmp;
408 	}
409 
410 	hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
411 
412 	if (!display->platform.valleyview && !display->platform.cherryview) {
413 		hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
414 		hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
415 	} else {
416 		/* Mask out read-only status bits. */
417 		hw_state->dpll &= ~(DPLL_LOCK_VLV |
418 				    DPLL_PORTC_READY_MASK |
419 				    DPLL_PORTB_READY_MASK);
420 	}
421 }
422 
423 /* Returns the clock of the currently programmed mode of the given pipe. */
424 void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
425 {
426 	struct intel_display *display = to_intel_display(crtc_state);
427 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
428 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
429 	u32 dpll = hw_state->dpll;
430 	u32 fp;
431 	struct dpll clock;
432 	int port_clock;
433 	int refclk = i9xx_pll_refclk(crtc_state);
434 
435 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
436 		fp = hw_state->fp0;
437 	else
438 		fp = hw_state->fp1;
439 
440 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
441 	if (display->platform.pineview) {
442 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
443 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
444 	} else {
445 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
446 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
447 	}
448 
449 	if (DISPLAY_VER(display) != 2) {
450 		if (display->platform.pineview)
451 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
452 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
453 		else
454 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
455 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
456 
457 		switch (dpll & DPLL_MODE_MASK) {
458 		case DPLLB_MODE_DAC_SERIAL:
459 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
460 				5 : 10;
461 			break;
462 		case DPLLB_MODE_LVDS:
463 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
464 				7 : 14;
465 			break;
466 		default:
467 			drm_dbg_kms(display->drm,
468 				    "Unknown DPLL mode %08x in programmed "
469 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
470 			return;
471 		}
472 
473 		if (display->platform.pineview)
474 			port_clock = pnv_calc_dpll_params(refclk, &clock);
475 		else
476 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
477 	} else {
478 		enum pipe lvds_pipe;
479 
480 		if (display->platform.i85x &&
481 		    intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
482 		    lvds_pipe == crtc->pipe) {
483 			u32 lvds = intel_de_read(display, LVDS);
484 
485 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
486 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
487 
488 			if (lvds & LVDS_CLKB_POWER_UP)
489 				clock.p2 = 7;
490 			else
491 				clock.p2 = 14;
492 		} else {
493 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
494 				clock.p1 = 2;
495 			else {
496 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
497 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
498 			}
499 			if (dpll & PLL_P2_DIVIDE_BY_4)
500 				clock.p2 = 4;
501 			else
502 				clock.p2 = 2;
503 		}
504 
505 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
506 	}
507 
508 	/*
509 	 * This value includes pixel_multiplier. We will use
510 	 * port_clock to compute adjusted_mode.crtc_clock in the
511 	 * encoder's get_config() function.
512 	 */
513 	crtc_state->port_clock = port_clock;
514 }
515 
516 void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
517 {
518 	struct intel_display *display = to_intel_display(crtc_state);
519 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
520 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
521 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
522 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
523 	int refclk = 100000;
524 	struct dpll clock;
525 	u32 tmp;
526 
527 	/* In case of DSI, DPLL will not be used */
528 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
529 		return;
530 
531 	vlv_dpio_get(display->drm);
532 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW3(ch));
533 	vlv_dpio_put(display->drm);
534 
535 	clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
536 	clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
537 	clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
538 	clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
539 	clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
540 
541 	crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
542 }
543 
544 void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
545 {
546 	struct intel_display *display = to_intel_display(crtc_state);
547 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
548 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
549 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
550 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
551 	struct dpll clock;
552 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
553 	int refclk = 100000;
554 
555 	/* In case of DSI, DPLL will not be used */
556 	if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
557 		return;
558 
559 	vlv_dpio_get(display->drm);
560 	cmn_dw13 = vlv_dpio_read(display->drm, phy, CHV_CMN_DW13(ch));
561 	pll_dw0 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW0(ch));
562 	pll_dw1 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW1(ch));
563 	pll_dw2 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW2(ch));
564 	pll_dw3 = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
565 	vlv_dpio_put(display->drm);
566 
567 	clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
568 	clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
569 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
570 		clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
571 	clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
572 	clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
573 	clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
574 
575 	crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
576 }
577 
578 /*
579  * Returns whether the given set of divisors are valid for a given refclk with
580  * the given connectors.
581  */
582 static bool intel_pll_is_valid(struct intel_display *display,
583 			       const struct intel_limit *limit,
584 			       const struct dpll *clock)
585 {
586 	if (clock->n < limit->n.min || limit->n.max < clock->n)
587 		return false;
588 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
589 		return false;
590 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
591 		return false;
592 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
593 		return false;
594 
595 	if (!display->platform.pineview &&
596 	    !display->platform.valleyview && !display->platform.cherryview &&
597 	    !display->platform.broxton && !display->platform.geminilake)
598 		if (clock->m1 <= clock->m2)
599 			return false;
600 
601 	if (!display->platform.valleyview && !display->platform.cherryview &&
602 	    !display->platform.broxton && !display->platform.geminilake) {
603 		if (clock->p < limit->p.min || limit->p.max < clock->p)
604 			return false;
605 		if (clock->m < limit->m.min || limit->m.max < clock->m)
606 			return false;
607 	}
608 
609 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
610 		return false;
611 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
612 	 * connector, etc., rather than just a single range.
613 	 */
614 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
615 		return false;
616 
617 	return true;
618 }
619 
620 static int
621 i9xx_select_p2_div(const struct intel_limit *limit,
622 		   const struct intel_crtc_state *crtc_state,
623 		   int target)
624 {
625 	struct intel_display *display = to_intel_display(crtc_state);
626 
627 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
628 		/*
629 		 * For LVDS just rely on its current settings for dual-channel.
630 		 * We haven't figured out how to reliably set up different
631 		 * single/dual channel state, if we even can.
632 		 */
633 		if (intel_is_dual_link_lvds(display))
634 			return limit->p2.p2_fast;
635 		else
636 			return limit->p2.p2_slow;
637 	} else {
638 		if (target < limit->p2.dot_limit)
639 			return limit->p2.p2_slow;
640 		else
641 			return limit->p2.p2_fast;
642 	}
643 }
644 
645 /*
646  * Returns a set of divisors for the desired target clock with the given
647  * refclk, or FALSE.
648  *
649  * Target and reference clocks are specified in kHz.
650  *
651  * If match_clock is provided, then best_clock P divider must match the P
652  * divider from @match_clock used for LVDS downclocking.
653  */
654 static bool
655 i9xx_find_best_dpll(const struct intel_limit *limit,
656 		    struct intel_crtc_state *crtc_state,
657 		    int target, int refclk,
658 		    const struct dpll *match_clock,
659 		    struct dpll *best_clock)
660 {
661 	struct intel_display *display = to_intel_display(crtc_state);
662 	struct dpll clock;
663 	int err = target;
664 
665 	memset(best_clock, 0, sizeof(*best_clock));
666 
667 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
668 
669 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
670 	     clock.m1++) {
671 		for (clock.m2 = limit->m2.min;
672 		     clock.m2 <= limit->m2.max; clock.m2++) {
673 			if (clock.m2 >= clock.m1)
674 				break;
675 			for (clock.n = limit->n.min;
676 			     clock.n <= limit->n.max; clock.n++) {
677 				for (clock.p1 = limit->p1.min;
678 					clock.p1 <= limit->p1.max; clock.p1++) {
679 					int this_err;
680 
681 					i9xx_calc_dpll_params(refclk, &clock);
682 					if (!intel_pll_is_valid(display,
683 								limit,
684 								&clock))
685 						continue;
686 					if (match_clock &&
687 					    clock.p != match_clock->p)
688 						continue;
689 
690 					this_err = abs(clock.dot - target);
691 					if (this_err < err) {
692 						*best_clock = clock;
693 						err = this_err;
694 					}
695 				}
696 			}
697 		}
698 	}
699 
700 	return (err != target);
701 }
702 
703 /*
704  * Returns a set of divisors for the desired target clock with the given
705  * refclk, or FALSE.
706  *
707  * Target and reference clocks are specified in kHz.
708  *
709  * If match_clock is provided, then best_clock P divider must match the P
710  * divider from @match_clock used for LVDS downclocking.
711  */
712 static bool
713 pnv_find_best_dpll(const struct intel_limit *limit,
714 		   struct intel_crtc_state *crtc_state,
715 		   int target, int refclk,
716 		   const struct dpll *match_clock,
717 		   struct dpll *best_clock)
718 {
719 	struct intel_display *display = to_intel_display(crtc_state);
720 	struct dpll clock;
721 	int err = target;
722 
723 	memset(best_clock, 0, sizeof(*best_clock));
724 
725 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
726 
727 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
728 	     clock.m1++) {
729 		for (clock.m2 = limit->m2.min;
730 		     clock.m2 <= limit->m2.max; clock.m2++) {
731 			for (clock.n = limit->n.min;
732 			     clock.n <= limit->n.max; clock.n++) {
733 				for (clock.p1 = limit->p1.min;
734 					clock.p1 <= limit->p1.max; clock.p1++) {
735 					int this_err;
736 
737 					pnv_calc_dpll_params(refclk, &clock);
738 					if (!intel_pll_is_valid(display,
739 								limit,
740 								&clock))
741 						continue;
742 					if (match_clock &&
743 					    clock.p != match_clock->p)
744 						continue;
745 
746 					this_err = abs(clock.dot - target);
747 					if (this_err < err) {
748 						*best_clock = clock;
749 						err = this_err;
750 					}
751 				}
752 			}
753 		}
754 	}
755 
756 	return (err != target);
757 }
758 
759 /*
760  * Returns a set of divisors for the desired target clock with the given
761  * refclk, or FALSE.
762  *
763  * Target and reference clocks are specified in kHz.
764  *
765  * If match_clock is provided, then best_clock P divider must match the P
766  * divider from @match_clock used for LVDS downclocking.
767  */
768 static bool
769 g4x_find_best_dpll(const struct intel_limit *limit,
770 		   struct intel_crtc_state *crtc_state,
771 		   int target, int refclk,
772 		   const struct dpll *match_clock,
773 		   struct dpll *best_clock)
774 {
775 	struct intel_display *display = to_intel_display(crtc_state);
776 	struct dpll clock;
777 	int max_n;
778 	bool found = false;
779 	/* approximately equals target * 0.00585 */
780 	int err_most = (target >> 8) + (target >> 9);
781 
782 	memset(best_clock, 0, sizeof(*best_clock));
783 
784 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
785 
786 	max_n = limit->n.max;
787 	/* based on hardware requirement, prefer smaller n to precision */
788 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
789 		/* based on hardware requirement, prefer larger m1,m2 */
790 		for (clock.m1 = limit->m1.max;
791 		     clock.m1 >= limit->m1.min; clock.m1--) {
792 			for (clock.m2 = limit->m2.max;
793 			     clock.m2 >= limit->m2.min; clock.m2--) {
794 				for (clock.p1 = limit->p1.max;
795 				     clock.p1 >= limit->p1.min; clock.p1--) {
796 					int this_err;
797 
798 					i9xx_calc_dpll_params(refclk, &clock);
799 					if (!intel_pll_is_valid(display,
800 								limit,
801 								&clock))
802 						continue;
803 
804 					this_err = abs(clock.dot - target);
805 					if (this_err < err_most) {
806 						*best_clock = clock;
807 						err_most = this_err;
808 						max_n = clock.n;
809 						found = true;
810 					}
811 				}
812 			}
813 		}
814 	}
815 	return found;
816 }
817 
818 /*
819  * Check if the calculated PLL configuration is more optimal compared to the
820  * best configuration and error found so far. Return the calculated error.
821  */
822 static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
823 			       const struct dpll *calculated_clock,
824 			       const struct dpll *best_clock,
825 			       unsigned int best_error_ppm,
826 			       unsigned int *error_ppm)
827 {
828 	/*
829 	 * For CHV ignore the error and consider only the P value.
830 	 * Prefer a bigger P value based on HW requirements.
831 	 */
832 	if (display->platform.cherryview) {
833 		*error_ppm = 0;
834 
835 		return calculated_clock->p > best_clock->p;
836 	}
837 
838 	if (drm_WARN_ON_ONCE(display->drm, !target_freq))
839 		return false;
840 
841 	*error_ppm = div_u64(1000000ULL *
842 				abs(target_freq - calculated_clock->dot),
843 			     target_freq);
844 	/*
845 	 * Prefer a better P value over a better (smaller) error if the error
846 	 * is small. Ensure this preference for future configurations too by
847 	 * setting the error to 0.
848 	 */
849 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
850 		*error_ppm = 0;
851 
852 		return true;
853 	}
854 
855 	return *error_ppm + 10 < best_error_ppm;
856 }
857 
858 /*
859  * Returns a set of divisors for the desired target clock with the given
860  * refclk, or FALSE.
861  */
862 static bool
863 vlv_find_best_dpll(const struct intel_limit *limit,
864 		   struct intel_crtc_state *crtc_state,
865 		   int target, int refclk,
866 		   const struct dpll *match_clock,
867 		   struct dpll *best_clock)
868 {
869 	struct intel_display *display = to_intel_display(crtc_state);
870 	struct dpll clock;
871 	unsigned int bestppm = 1000000;
872 	/* min update 19.2 MHz */
873 	int max_n = min(limit->n.max, refclk / 19200);
874 	bool found = false;
875 
876 	memset(best_clock, 0, sizeof(*best_clock));
877 
878 	/* based on hardware requirement, prefer smaller n to precision */
879 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
880 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
881 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
882 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
883 				clock.p = clock.p1 * clock.p2 * 5;
884 				/* based on hardware requirement, prefer bigger m1,m2 values */
885 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
886 					unsigned int ppm;
887 
888 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
889 								     refclk * clock.m1);
890 
891 					vlv_calc_dpll_params(refclk, &clock);
892 
893 					if (!intel_pll_is_valid(display,
894 								limit,
895 								&clock))
896 						continue;
897 
898 					if (!vlv_PLL_is_optimal(display, target,
899 								&clock,
900 								best_clock,
901 								bestppm, &ppm))
902 						continue;
903 
904 					*best_clock = clock;
905 					bestppm = ppm;
906 					found = true;
907 				}
908 			}
909 		}
910 	}
911 
912 	return found;
913 }
914 
915 /*
916  * Returns a set of divisors for the desired target clock with the given
917  * refclk, or FALSE.
918  */
919 static bool
920 chv_find_best_dpll(const struct intel_limit *limit,
921 		   struct intel_crtc_state *crtc_state,
922 		   int target, int refclk,
923 		   const struct dpll *match_clock,
924 		   struct dpll *best_clock)
925 {
926 	struct intel_display *display = to_intel_display(crtc_state);
927 	unsigned int best_error_ppm;
928 	struct dpll clock;
929 	u64 m2;
930 	int found = false;
931 
932 	memset(best_clock, 0, sizeof(*best_clock));
933 	best_error_ppm = 1000000;
934 
935 	/*
936 	 * Based on hardware doc, the n always set to 1, and m1 always
937 	 * set to 2.  If requires to support 200Mhz refclk, we need to
938 	 * revisit this because n may not 1 anymore.
939 	 */
940 	clock.n = 1;
941 	clock.m1 = 2;
942 
943 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
944 		for (clock.p2 = limit->p2.p2_fast;
945 				clock.p2 >= limit->p2.p2_slow;
946 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
947 			unsigned int error_ppm;
948 
949 			clock.p = clock.p1 * clock.p2 * 5;
950 
951 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
952 						   refclk * clock.m1);
953 
954 			if (m2 > INT_MAX/clock.m1)
955 				continue;
956 
957 			clock.m2 = m2;
958 
959 			chv_calc_dpll_params(refclk, &clock);
960 
961 			if (!intel_pll_is_valid(display, limit, &clock))
962 				continue;
963 
964 			if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
965 						best_error_ppm, &error_ppm))
966 				continue;
967 
968 			*best_clock = clock;
969 			best_error_ppm = error_ppm;
970 			found = true;
971 		}
972 	}
973 
974 	return found;
975 }
976 
977 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
978 			struct dpll *best_clock)
979 {
980 	const struct intel_limit *limit = &intel_limits_bxt;
981 	int refclk = 100000;
982 
983 	return chv_find_best_dpll(limit, crtc_state,
984 				  crtc_state->port_clock, refclk,
985 				  NULL, best_clock);
986 }
987 
988 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
989 {
990 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
991 }
992 
993 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
994 {
995 	return (1 << dpll->n) << 16 | dpll->m2;
996 }
997 
998 static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
999 {
1000 	return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1001 }
1002 
1003 static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
1004 		     const struct dpll *clock,
1005 		     const struct dpll *reduced_clock)
1006 {
1007 	struct intel_display *display = to_intel_display(crtc_state);
1008 	u32 dpll;
1009 
1010 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1011 
1012 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1013 		dpll |= DPLLB_MODE_LVDS;
1014 	else
1015 		dpll |= DPLLB_MODE_DAC_SERIAL;
1016 
1017 	if (display->platform.i945g || display->platform.i945gm ||
1018 	    display->platform.g33 || display->platform.pineview) {
1019 		dpll |= (crtc_state->pixel_multiplier - 1)
1020 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
1021 	}
1022 
1023 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1024 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1025 		dpll |= DPLL_SDVO_HIGH_SPEED;
1026 
1027 	if (intel_crtc_has_dp_encoder(crtc_state))
1028 		dpll |= DPLL_SDVO_HIGH_SPEED;
1029 
1030 	/* compute bitmask from p1 value */
1031 	if (display->platform.g4x) {
1032 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1033 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1034 	} else if (display->platform.pineview) {
1035 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
1036 		WARN_ON(reduced_clock->p1 != clock->p1);
1037 	} else {
1038 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1039 		WARN_ON(reduced_clock->p1 != clock->p1);
1040 	}
1041 
1042 	switch (clock->p2) {
1043 	case 5:
1044 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1045 		break;
1046 	case 7:
1047 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1048 		break;
1049 	case 10:
1050 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1051 		break;
1052 	case 14:
1053 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1054 		break;
1055 	}
1056 	WARN_ON(reduced_clock->p2 != clock->p2);
1057 
1058 	if (DISPLAY_VER(display) >= 4)
1059 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1060 
1061 	if (crtc_state->sdvo_tv_clock)
1062 		dpll |= PLL_REF_INPUT_TVCLKINBC;
1063 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1064 		 intel_panel_use_ssc(display))
1065 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1066 	else
1067 		dpll |= PLL_REF_INPUT_DREFCLK;
1068 
1069 	return dpll;
1070 }
1071 
1072 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
1073 			      const struct dpll *clock,
1074 			      const struct dpll *reduced_clock)
1075 {
1076 	struct intel_display *display = to_intel_display(crtc_state);
1077 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1078 
1079 	if (display->platform.pineview) {
1080 		hw_state->fp0 = pnv_dpll_compute_fp(clock);
1081 		hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
1082 	} else {
1083 		hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1084 		hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1085 	}
1086 
1087 	hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
1088 
1089 	if (DISPLAY_VER(display) >= 4)
1090 		hw_state->dpll_md = i965_dpll_md(crtc_state);
1091 }
1092 
1093 static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
1094 		     const struct dpll *clock,
1095 		     const struct dpll *reduced_clock)
1096 {
1097 	struct intel_display *display = to_intel_display(crtc_state);
1098 	u32 dpll;
1099 
1100 	dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
1101 
1102 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1103 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1104 	} else {
1105 		if (clock->p1 == 2)
1106 			dpll |= PLL_P1_DIVIDE_BY_TWO;
1107 		else
1108 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1109 		if (clock->p2 == 4)
1110 			dpll |= PLL_P2_DIVIDE_BY_4;
1111 	}
1112 	WARN_ON(reduced_clock->p1 != clock->p1);
1113 	WARN_ON(reduced_clock->p2 != clock->p2);
1114 
1115 	/*
1116 	 * Bspec:
1117 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
1118 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
1119 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
1120 	 *  Enable) must be set to “1” in both the DPLL A Control Register
1121 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
1122 	 *
1123 	 * For simplicity We simply keep both bits always enabled in
1124 	 * both DPLLS. The spec says we should disable the DVO 2X clock
1125 	 * when not needed, but this seems to work fine in practice.
1126 	 */
1127 	if (display->platform.i830 ||
1128 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
1129 		dpll |= DPLL_DVO_2X_MODE;
1130 
1131 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1132 	    intel_panel_use_ssc(display))
1133 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1134 	else
1135 		dpll |= PLL_REF_INPUT_DREFCLK;
1136 
1137 	return dpll;
1138 }
1139 
1140 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
1141 			      const struct dpll *clock,
1142 			      const struct dpll *reduced_clock)
1143 {
1144 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1145 
1146 	hw_state->fp0 = i9xx_dpll_compute_fp(clock);
1147 	hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
1148 
1149 	hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
1150 }
1151 
1152 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
1153 				  struct intel_crtc *crtc)
1154 {
1155 	struct intel_display *display = to_intel_display(state);
1156 	struct intel_crtc_state *crtc_state =
1157 		intel_atomic_get_new_crtc_state(state, crtc);
1158 	struct intel_encoder *encoder =
1159 		intel_get_crtc_new_encoder(state, crtc_state);
1160 	int ret;
1161 
1162 	if (DISPLAY_VER(display) < 11 &&
1163 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1164 		return 0;
1165 
1166 	ret = intel_dpll_compute(state, crtc, encoder);
1167 	if (ret)
1168 		return ret;
1169 
1170 	/* FIXME this is a mess */
1171 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1172 		return 0;
1173 
1174 	/* CRT dotclock is determined via other means */
1175 	if (!crtc_state->has_pch_encoder)
1176 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1177 
1178 	return 0;
1179 }
1180 
1181 static int hsw_crtc_get_dpll(struct intel_atomic_state *state,
1182 			     struct intel_crtc *crtc)
1183 {
1184 	struct intel_display *display = to_intel_display(state);
1185 	struct intel_crtc_state *crtc_state =
1186 		intel_atomic_get_new_crtc_state(state, crtc);
1187 	struct intel_encoder *encoder =
1188 		intel_get_crtc_new_encoder(state, crtc_state);
1189 
1190 	if (DISPLAY_VER(display) < 11 &&
1191 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1192 		return 0;
1193 
1194 	return intel_dpll_reserve(state, crtc, encoder);
1195 }
1196 
1197 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
1198 				  struct intel_crtc *crtc)
1199 {
1200 	struct intel_crtc_state *crtc_state =
1201 		intel_atomic_get_new_crtc_state(state, crtc);
1202 	struct intel_encoder *encoder =
1203 		intel_get_crtc_new_encoder(state, crtc_state);
1204 	int ret;
1205 
1206 	ret = intel_mpllb_calc_state(crtc_state, encoder);
1207 	if (ret)
1208 		return ret;
1209 
1210 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1211 
1212 	return 0;
1213 }
1214 
1215 static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
1216 				      struct intel_crtc *crtc)
1217 {
1218 	struct intel_crtc_state *crtc_state =
1219 		intel_atomic_get_new_crtc_state(state, crtc);
1220 	struct intel_encoder *encoder =
1221 		intel_get_crtc_new_encoder(state, crtc_state);
1222 	int ret;
1223 
1224 	ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
1225 	if (ret)
1226 		return ret;
1227 
1228 	/* TODO: Do the readback via intel_compute_shared_dplls() */
1229 	crtc_state->port_clock =
1230 			intel_lt_phy_calc_port_clock(encoder, crtc_state);
1231 
1232 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1233 
1234 	return 0;
1235 }
1236 
1237 static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
1238 {
1239 	struct intel_display *display = to_intel_display(crtc_state);
1240 
1241 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1242 	    ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
1243 	     (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
1244 		return 25;
1245 
1246 	if (crtc_state->sdvo_tv_clock)
1247 		return 20;
1248 
1249 	return 21;
1250 }
1251 
1252 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1253 {
1254 	return dpll->m < factor * dpll->n;
1255 }
1256 
1257 static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
1258 {
1259 	u32 fp;
1260 
1261 	fp = i9xx_dpll_compute_fp(clock);
1262 	if (ilk_needs_fb_cb_tune(clock, factor))
1263 		fp |= FP_CB_TUNE;
1264 
1265 	return fp;
1266 }
1267 
1268 static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
1269 		    const struct dpll *clock,
1270 		    const struct dpll *reduced_clock)
1271 {
1272 	struct intel_display *display = to_intel_display(crtc_state);
1273 	u32 dpll;
1274 
1275 	dpll = DPLL_VCO_ENABLE;
1276 
1277 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1278 		dpll |= DPLLB_MODE_LVDS;
1279 	else
1280 		dpll |= DPLLB_MODE_DAC_SERIAL;
1281 
1282 	dpll |= (crtc_state->pixel_multiplier - 1)
1283 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1284 
1285 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1286 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1287 		dpll |= DPLL_SDVO_HIGH_SPEED;
1288 
1289 	if (intel_crtc_has_dp_encoder(crtc_state))
1290 		dpll |= DPLL_SDVO_HIGH_SPEED;
1291 
1292 	/*
1293 	 * The high speed IO clock is only really required for
1294 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1295 	 * possible to share the DPLL between CRT and HDMI. Enabling
1296 	 * the clock needlessly does no real harm, except use up a
1297 	 * bit of power potentially.
1298 	 *
1299 	 * We'll limit this to IVB with 3 pipes, since it has only two
1300 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1301 	 * driving PCH ports at the same time. On SNB we could do this,
1302 	 * and potentially avoid enabling the second DPLL, but it's not
1303 	 * clear if it''s a win or loss power wise. No point in doing
1304 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1305 	 */
1306 	if (INTEL_NUM_PIPES(display) == 3 &&
1307 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1308 		dpll |= DPLL_SDVO_HIGH_SPEED;
1309 
1310 	/* compute bitmask from p1 value */
1311 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1312 	/* also FPA1 */
1313 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1314 
1315 	switch (clock->p2) {
1316 	case 5:
1317 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1318 		break;
1319 	case 7:
1320 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1321 		break;
1322 	case 10:
1323 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1324 		break;
1325 	case 14:
1326 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1327 		break;
1328 	}
1329 	WARN_ON(reduced_clock->p2 != clock->p2);
1330 
1331 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1332 	    intel_panel_use_ssc(display))
1333 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1334 	else
1335 		dpll |= PLL_REF_INPUT_DREFCLK;
1336 
1337 	return dpll;
1338 }
1339 
1340 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1341 			     const struct dpll *clock,
1342 			     const struct dpll *reduced_clock)
1343 {
1344 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1345 	int factor = ilk_fb_cb_factor(crtc_state);
1346 
1347 	hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
1348 	hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
1349 
1350 	hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
1351 }
1352 
1353 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1354 				  struct intel_crtc *crtc)
1355 {
1356 	struct intel_display *display = to_intel_display(state);
1357 	struct intel_crtc_state *crtc_state =
1358 		intel_atomic_get_new_crtc_state(state, crtc);
1359 	const struct intel_limit *limit;
1360 	int refclk = 120000;
1361 	int ret;
1362 
1363 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1364 	if (!crtc_state->has_pch_encoder)
1365 		return 0;
1366 
1367 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1368 		if (intel_panel_use_ssc(display)) {
1369 			drm_dbg_kms(display->drm,
1370 				    "using SSC reference clock of %d kHz\n",
1371 				    display->vbt.lvds_ssc_freq);
1372 			refclk = display->vbt.lvds_ssc_freq;
1373 		}
1374 
1375 		if (intel_is_dual_link_lvds(display)) {
1376 			if (refclk == 100000)
1377 				limit = &ilk_limits_dual_lvds_100m;
1378 			else
1379 				limit = &ilk_limits_dual_lvds;
1380 		} else {
1381 			if (refclk == 100000)
1382 				limit = &ilk_limits_single_lvds_100m;
1383 			else
1384 				limit = &ilk_limits_single_lvds;
1385 		}
1386 	} else {
1387 		limit = &ilk_limits_dac;
1388 	}
1389 
1390 	if (!crtc_state->clock_set &&
1391 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1392 				refclk, NULL, &crtc_state->dpll))
1393 		return -EINVAL;
1394 
1395 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1396 
1397 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1398 			 &crtc_state->dpll);
1399 
1400 	ret = intel_dpll_compute(state, crtc, NULL);
1401 	if (ret)
1402 		return ret;
1403 
1404 	crtc_state->port_clock = crtc_state->dpll.dot;
1405 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1406 
1407 	return ret;
1408 }
1409 
1410 static int ilk_crtc_get_dpll(struct intel_atomic_state *state,
1411 			     struct intel_crtc *crtc)
1412 {
1413 	struct intel_crtc_state *crtc_state =
1414 		intel_atomic_get_new_crtc_state(state, crtc);
1415 
1416 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1417 	if (!crtc_state->has_pch_encoder)
1418 		return 0;
1419 
1420 	return intel_dpll_reserve(state, crtc, NULL);
1421 }
1422 
1423 static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
1424 {
1425 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1426 	u32 dpll;
1427 
1428 	dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1429 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1430 
1431 	if (crtc->pipe != PIPE_A)
1432 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1433 
1434 	/* DPLL not used with DSI, but still need the rest set up */
1435 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1436 		dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
1437 
1438 	return dpll;
1439 }
1440 
1441 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1442 {
1443 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1444 
1445 	hw_state->dpll = vlv_dpll(crtc_state);
1446 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1447 }
1448 
1449 static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
1450 {
1451 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1452 	u32 dpll;
1453 
1454 	dpll = DPLL_SSC_REF_CLK_CHV |
1455 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1456 
1457 	if (crtc->pipe != PIPE_A)
1458 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1459 
1460 	/* DPLL not used with DSI, but still need the rest set up */
1461 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1462 		dpll |= DPLL_VCO_ENABLE;
1463 
1464 	return dpll;
1465 }
1466 
1467 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1468 {
1469 	struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1470 
1471 	hw_state->dpll = chv_dpll(crtc_state);
1472 	hw_state->dpll_md = i965_dpll_md(crtc_state);
1473 }
1474 
1475 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1476 				  struct intel_crtc *crtc)
1477 {
1478 	struct intel_crtc_state *crtc_state =
1479 		intel_atomic_get_new_crtc_state(state, crtc);
1480 	const struct intel_limit *limit = &intel_limits_chv;
1481 	int refclk = 100000;
1482 
1483 	if (!crtc_state->clock_set &&
1484 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1485 				refclk, NULL, &crtc_state->dpll))
1486 		return -EINVAL;
1487 
1488 	chv_calc_dpll_params(refclk, &crtc_state->dpll);
1489 
1490 	chv_compute_dpll(crtc_state);
1491 
1492 	/* FIXME this is a mess */
1493 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1494 		return 0;
1495 
1496 	crtc_state->port_clock = crtc_state->dpll.dot;
1497 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1498 
1499 	return 0;
1500 }
1501 
1502 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1503 				  struct intel_crtc *crtc)
1504 {
1505 	struct intel_crtc_state *crtc_state =
1506 		intel_atomic_get_new_crtc_state(state, crtc);
1507 	const struct intel_limit *limit = &intel_limits_vlv;
1508 	int refclk = 100000;
1509 
1510 	if (!crtc_state->clock_set &&
1511 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1512 				refclk, NULL, &crtc_state->dpll))
1513 		return -EINVAL;
1514 
1515 	vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1516 
1517 	vlv_compute_dpll(crtc_state);
1518 
1519 	/* FIXME this is a mess */
1520 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1521 		return 0;
1522 
1523 	crtc_state->port_clock = crtc_state->dpll.dot;
1524 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1525 
1526 	return 0;
1527 }
1528 
1529 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1530 				  struct intel_crtc *crtc)
1531 {
1532 	struct intel_display *display = to_intel_display(state);
1533 	struct intel_crtc_state *crtc_state =
1534 		intel_atomic_get_new_crtc_state(state, crtc);
1535 	const struct intel_limit *limit;
1536 	int refclk = 96000;
1537 
1538 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1539 		if (intel_panel_use_ssc(display)) {
1540 			refclk = display->vbt.lvds_ssc_freq;
1541 			drm_dbg_kms(display->drm,
1542 				    "using SSC reference clock of %d kHz\n",
1543 				    refclk);
1544 		}
1545 
1546 		if (intel_is_dual_link_lvds(display))
1547 			limit = &intel_limits_g4x_dual_channel_lvds;
1548 		else
1549 			limit = &intel_limits_g4x_single_channel_lvds;
1550 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1551 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1552 		limit = &intel_limits_g4x_hdmi;
1553 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1554 		limit = &intel_limits_g4x_sdvo;
1555 	} else {
1556 		/* The option is for other outputs */
1557 		limit = &intel_limits_i9xx_sdvo;
1558 	}
1559 
1560 	if (!crtc_state->clock_set &&
1561 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1562 				refclk, NULL, &crtc_state->dpll))
1563 		return -EINVAL;
1564 
1565 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1566 
1567 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1568 			  &crtc_state->dpll);
1569 
1570 	crtc_state->port_clock = crtc_state->dpll.dot;
1571 	/* FIXME this is a mess */
1572 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1573 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1574 
1575 	return 0;
1576 }
1577 
1578 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1579 				  struct intel_crtc *crtc)
1580 {
1581 	struct intel_display *display = to_intel_display(state);
1582 	struct intel_crtc_state *crtc_state =
1583 		intel_atomic_get_new_crtc_state(state, crtc);
1584 	const struct intel_limit *limit;
1585 	int refclk = 96000;
1586 
1587 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1588 		if (intel_panel_use_ssc(display)) {
1589 			refclk = display->vbt.lvds_ssc_freq;
1590 			drm_dbg_kms(display->drm,
1591 				    "using SSC reference clock of %d kHz\n",
1592 				    refclk);
1593 		}
1594 
1595 		limit = &pnv_limits_lvds;
1596 	} else {
1597 		limit = &pnv_limits_sdvo;
1598 	}
1599 
1600 	if (!crtc_state->clock_set &&
1601 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1602 				refclk, NULL, &crtc_state->dpll))
1603 		return -EINVAL;
1604 
1605 	pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1606 
1607 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1608 			  &crtc_state->dpll);
1609 
1610 	crtc_state->port_clock = crtc_state->dpll.dot;
1611 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1612 
1613 	return 0;
1614 }
1615 
1616 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1617 				   struct intel_crtc *crtc)
1618 {
1619 	struct intel_display *display = to_intel_display(state);
1620 	struct intel_crtc_state *crtc_state =
1621 		intel_atomic_get_new_crtc_state(state, crtc);
1622 	const struct intel_limit *limit;
1623 	int refclk = 96000;
1624 
1625 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1626 		if (intel_panel_use_ssc(display)) {
1627 			refclk = display->vbt.lvds_ssc_freq;
1628 			drm_dbg_kms(display->drm,
1629 				    "using SSC reference clock of %d kHz\n",
1630 				    refclk);
1631 		}
1632 
1633 		limit = &intel_limits_i9xx_lvds;
1634 	} else {
1635 		limit = &intel_limits_i9xx_sdvo;
1636 	}
1637 
1638 	if (!crtc_state->clock_set &&
1639 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1640 				 refclk, NULL, &crtc_state->dpll))
1641 		return -EINVAL;
1642 
1643 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1644 
1645 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1646 			  &crtc_state->dpll);
1647 
1648 	crtc_state->port_clock = crtc_state->dpll.dot;
1649 	/* FIXME this is a mess */
1650 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1651 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1652 
1653 	return 0;
1654 }
1655 
1656 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1657 				   struct intel_crtc *crtc)
1658 {
1659 	struct intel_display *display = to_intel_display(state);
1660 	struct intel_crtc_state *crtc_state =
1661 		intel_atomic_get_new_crtc_state(state, crtc);
1662 	const struct intel_limit *limit;
1663 	int refclk = 48000;
1664 
1665 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1666 		if (intel_panel_use_ssc(display)) {
1667 			refclk = display->vbt.lvds_ssc_freq;
1668 			drm_dbg_kms(display->drm,
1669 				    "using SSC reference clock of %d kHz\n",
1670 				    refclk);
1671 		}
1672 
1673 		limit = &intel_limits_i8xx_lvds;
1674 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1675 		limit = &intel_limits_i8xx_dvo;
1676 	} else {
1677 		limit = &intel_limits_i8xx_dac;
1678 	}
1679 
1680 	if (!crtc_state->clock_set &&
1681 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1682 				 refclk, NULL, &crtc_state->dpll))
1683 		return -EINVAL;
1684 
1685 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1686 
1687 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1688 			  &crtc_state->dpll);
1689 
1690 	crtc_state->port_clock = crtc_state->dpll.dot;
1691 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1692 
1693 	return 0;
1694 }
1695 
1696 static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
1697 	.crtc_compute_clock = xe3plpd_crtc_compute_clock,
1698 };
1699 
1700 static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
1701 	.crtc_compute_clock = hsw_crtc_compute_clock,
1702 	.crtc_get_dpll = hsw_crtc_get_dpll,
1703 };
1704 
1705 static const struct intel_dpll_global_funcs dg2_dpll_funcs = {
1706 	.crtc_compute_clock = dg2_crtc_compute_clock,
1707 };
1708 
1709 static const struct intel_dpll_global_funcs hsw_dpll_funcs = {
1710 	.crtc_compute_clock = hsw_crtc_compute_clock,
1711 	.crtc_get_dpll = hsw_crtc_get_dpll,
1712 };
1713 
1714 static const struct intel_dpll_global_funcs ilk_dpll_funcs = {
1715 	.crtc_compute_clock = ilk_crtc_compute_clock,
1716 	.crtc_get_dpll = ilk_crtc_get_dpll,
1717 };
1718 
1719 static const struct intel_dpll_global_funcs chv_dpll_funcs = {
1720 	.crtc_compute_clock = chv_crtc_compute_clock,
1721 };
1722 
1723 static const struct intel_dpll_global_funcs vlv_dpll_funcs = {
1724 	.crtc_compute_clock = vlv_crtc_compute_clock,
1725 };
1726 
1727 static const struct intel_dpll_global_funcs g4x_dpll_funcs = {
1728 	.crtc_compute_clock = g4x_crtc_compute_clock,
1729 };
1730 
1731 static const struct intel_dpll_global_funcs pnv_dpll_funcs = {
1732 	.crtc_compute_clock = pnv_crtc_compute_clock,
1733 };
1734 
1735 static const struct intel_dpll_global_funcs i9xx_dpll_funcs = {
1736 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1737 };
1738 
1739 static const struct intel_dpll_global_funcs i8xx_dpll_funcs = {
1740 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1741 };
1742 
1743 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1744 				  struct intel_crtc *crtc)
1745 {
1746 	struct intel_display *display = to_intel_display(state);
1747 	struct intel_crtc_state *crtc_state =
1748 		intel_atomic_get_new_crtc_state(state, crtc);
1749 	int ret;
1750 
1751 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1752 
1753 	memset(&crtc_state->dpll_hw_state, 0,
1754 	       sizeof(crtc_state->dpll_hw_state));
1755 
1756 	if (!crtc_state->hw.enable)
1757 		return 0;
1758 
1759 	ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
1760 	if (ret) {
1761 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1762 			    crtc->base.base.id, crtc->base.name);
1763 		return ret;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
1770 			     struct intel_crtc *crtc)
1771 {
1772 	struct intel_display *display = to_intel_display(state);
1773 	struct intel_crtc_state *crtc_state =
1774 		intel_atomic_get_new_crtc_state(state, crtc);
1775 	int ret;
1776 
1777 	drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
1778 	drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->intel_dpll);
1779 
1780 	if (!crtc_state->hw.enable || crtc_state->intel_dpll)
1781 		return 0;
1782 
1783 	if (!display->funcs.dpll->crtc_get_dpll)
1784 		return 0;
1785 
1786 	ret = display->funcs.dpll->crtc_get_dpll(state, crtc);
1787 	if (ret) {
1788 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1789 			    crtc->base.base.id, crtc->base.name);
1790 		return ret;
1791 	}
1792 
1793 	return 0;
1794 }
1795 
1796 void
1797 intel_dpll_init_clock_hook(struct intel_display *display)
1798 {
1799 	if (HAS_LT_PHY(display))
1800 		display->funcs.dpll = &xe3plpd_dpll_funcs;
1801 	else if (DISPLAY_VER(display) >= 14)
1802 		display->funcs.dpll = &mtl_dpll_funcs;
1803 	else if (display->platform.dg2)
1804 		display->funcs.dpll = &dg2_dpll_funcs;
1805 	else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
1806 		display->funcs.dpll = &hsw_dpll_funcs;
1807 	else if (HAS_PCH_SPLIT(display))
1808 		display->funcs.dpll = &ilk_dpll_funcs;
1809 	else if (display->platform.cherryview)
1810 		display->funcs.dpll = &chv_dpll_funcs;
1811 	else if (display->platform.valleyview)
1812 		display->funcs.dpll = &vlv_dpll_funcs;
1813 	else if (display->platform.g4x)
1814 		display->funcs.dpll = &g4x_dpll_funcs;
1815 	else if (display->platform.pineview)
1816 		display->funcs.dpll = &pnv_dpll_funcs;
1817 	else if (DISPLAY_VER(display) != 2)
1818 		display->funcs.dpll = &i9xx_dpll_funcs;
1819 	else
1820 		display->funcs.dpll = &i8xx_dpll_funcs;
1821 }
1822 
1823 static bool i9xx_has_pps(struct intel_display *display)
1824 {
1825 	if (display->platform.i830)
1826 		return false;
1827 
1828 	return display->platform.pineview || display->platform.mobile;
1829 }
1830 
1831 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1832 {
1833 	struct intel_display *display = to_intel_display(crtc_state);
1834 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1835 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1836 	enum pipe pipe = crtc->pipe;
1837 	int i;
1838 
1839 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
1840 
1841 	/* PLL is protected by panel, make sure we can write it */
1842 	if (i9xx_has_pps(display))
1843 		assert_pps_unlocked(display, pipe);
1844 
1845 	intel_de_write(display, FP0(pipe), hw_state->fp0);
1846 	intel_de_write(display, FP1(pipe), hw_state->fp1);
1847 
1848 	/*
1849 	 * Apparently we need to have VGA mode enabled prior to changing
1850 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1851 	 * dividers, even though the register value does change.
1852 	 */
1853 	intel_de_write(display, DPLL(display, pipe),
1854 		       hw_state->dpll & ~DPLL_VGA_MODE_DIS);
1855 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1856 
1857 	/* Wait for the clocks to stabilize. */
1858 	intel_de_posting_read(display, DPLL(display, pipe));
1859 	udelay(150);
1860 
1861 	if (DISPLAY_VER(display) >= 4) {
1862 		intel_de_write(display, DPLL_MD(display, pipe),
1863 			       hw_state->dpll_md);
1864 	} else {
1865 		/* The pixel multiplier can only be updated once the
1866 		 * DPLL is enabled and the clocks are stable.
1867 		 *
1868 		 * So write it again.
1869 		 */
1870 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1871 	}
1872 
1873 	/* We do this three times for luck */
1874 	for (i = 0; i < 3; i++) {
1875 		intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1876 		intel_de_posting_read(display, DPLL(display, pipe));
1877 		udelay(150); /* wait for warmup */
1878 	}
1879 }
1880 
1881 static void vlv_pllb_recal_opamp(struct intel_display *display,
1882 				 enum dpio_phy phy, enum dpio_channel ch)
1883 {
1884 	u32 tmp;
1885 
1886 	/*
1887 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1888 	 * and set it to a reasonable value instead.
1889 	 */
1890 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1891 	tmp &= 0xffffff00;
1892 	tmp |= 0x00000030;
1893 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1894 
1895 	tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1896 	tmp &= 0x00ffffff;
1897 	tmp |= 0x8c000000;
1898 	vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1899 
1900 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW17(ch));
1901 	tmp &= 0xffffff00;
1902 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW17(ch), tmp);
1903 
1904 	tmp = vlv_dpio_read(display->drm, phy, VLV_REF_DW11);
1905 	tmp &= 0x00ffffff;
1906 	tmp |= 0xb0000000;
1907 	vlv_dpio_write(display->drm, phy, VLV_REF_DW11, tmp);
1908 }
1909 
1910 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1911 {
1912 	struct intel_display *display = to_intel_display(crtc_state);
1913 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1914 	const struct dpll *clock = &crtc_state->dpll;
1915 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
1916 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
1917 	enum pipe pipe = crtc->pipe;
1918 	u32 tmp, coreclk;
1919 
1920 	vlv_dpio_get(display->drm);
1921 
1922 	/* See eDP HDMI DPIO driver vbios notes doc */
1923 
1924 	/* PLL B needs special handling */
1925 	if (pipe == PIPE_B)
1926 		vlv_pllb_recal_opamp(display, phy, ch);
1927 
1928 	/* Set up Tx target for periodic Rcomp update */
1929 	vlv_dpio_write(display->drm, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
1930 
1931 	/* Disable target IRef on PLL */
1932 	tmp = vlv_dpio_read(display->drm, phy, VLV_PLL_DW16(ch));
1933 	tmp &= 0x00ffffff;
1934 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW16(ch), tmp);
1935 
1936 	/* Disable fast lock */
1937 	vlv_dpio_write(display->drm, phy, VLV_CMN_DW0, 0x610);
1938 
1939 	/* Set idtafcrecal before PLL is enabled */
1940 	tmp = DPIO_M1_DIV(clock->m1) |
1941 		DPIO_M2_DIV(clock->m2) |
1942 		DPIO_P1_DIV(clock->p1) |
1943 		DPIO_P2_DIV(clock->p2) |
1944 		DPIO_N_DIV(clock->n) |
1945 		DPIO_K_DIV(1);
1946 
1947 	/*
1948 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1949 	 * but we don't support that).
1950 	 * Note: don't use the DAC post divider as it seems unstable.
1951 	 */
1952 	tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
1953 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1954 
1955 	tmp |= DPIO_ENABLE_CALIBRATION;
1956 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW3(ch), tmp);
1957 
1958 	/* Set HBR and RBR LPF coefficients */
1959 	if (crtc_state->port_clock == 162000 ||
1960 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1961 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1962 		vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x009f0003);
1963 	else
1964 		vlv_dpio_write(display->drm, phy, VLV_PLL_DW18(ch), 0x00d0000f);
1965 
1966 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1967 		/* Use SSC source */
1968 		if (pipe == PIPE_A)
1969 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1970 		else
1971 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1972 	} else { /* HDMI or VGA */
1973 		/* Use bend source */
1974 		if (pipe == PIPE_A)
1975 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df70000);
1976 		else
1977 			vlv_dpio_write(display->drm, phy, VLV_PLL_DW5(ch), 0x0df40000);
1978 	}
1979 
1980 	coreclk = vlv_dpio_read(display->drm, phy, VLV_PLL_DW7(ch));
1981 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1982 	if (intel_crtc_has_dp_encoder(crtc_state))
1983 		coreclk |= 0x01000000;
1984 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW7(ch), coreclk);
1985 
1986 	vlv_dpio_write(display->drm, phy, VLV_PLL_DW19(ch), 0x87871000);
1987 
1988 	vlv_dpio_put(display->drm);
1989 }
1990 
1991 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1992 {
1993 	struct intel_display *display = to_intel_display(crtc_state);
1994 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1995 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
1996 	enum pipe pipe = crtc->pipe;
1997 
1998 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
1999 	intel_de_posting_read(display, DPLL(display, pipe));
2000 	udelay(150);
2001 
2002 	if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2003 		drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
2004 }
2005 
2006 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
2007 {
2008 	struct intel_display *display = to_intel_display(crtc_state);
2009 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2010 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2011 	enum pipe pipe = crtc->pipe;
2012 
2013 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2014 
2015 	/* PLL is protected by panel, make sure we can write it */
2016 	assert_pps_unlocked(display, pipe);
2017 
2018 	/* Enable Refclk */
2019 	intel_de_write(display, DPLL(display, pipe),
2020 		       hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
2021 
2022 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2023 		vlv_prepare_pll(crtc_state);
2024 		_vlv_enable_pll(crtc_state);
2025 	}
2026 
2027 	intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
2028 	intel_de_posting_read(display, DPLL_MD(display, pipe));
2029 }
2030 
2031 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
2032 {
2033 	struct intel_display *display = to_intel_display(crtc_state);
2034 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2035 	const struct dpll *clock = &crtc_state->dpll;
2036 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2037 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2038 	u32 tmp, loopfilter, tribuf_calcntr;
2039 	u32 m2_frac;
2040 
2041 	m2_frac = clock->m2 & 0x3fffff;
2042 
2043 	vlv_dpio_get(display->drm);
2044 
2045 	/* p1 and p2 divider */
2046 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW13(ch),
2047 		       DPIO_CHV_S1_DIV(5) |
2048 		       DPIO_CHV_P1_DIV(clock->p1) |
2049 		       DPIO_CHV_P2_DIV(clock->p2) |
2050 		       DPIO_CHV_K_DIV(1));
2051 
2052 	/* Feedback post-divider - m2 */
2053 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW0(ch),
2054 		       DPIO_CHV_M2_DIV(clock->m2 >> 22));
2055 
2056 	/* Feedback refclk divider - n and m1 */
2057 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW1(ch),
2058 		       DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
2059 		       DPIO_CHV_N_DIV(1));
2060 
2061 	/* M2 fraction division */
2062 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW2(ch),
2063 		       DPIO_CHV_M2_FRAC_DIV(m2_frac));
2064 
2065 	/* M2 fraction division enable */
2066 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW3(ch));
2067 	tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
2068 	tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
2069 	if (m2_frac)
2070 		tmp |= DPIO_CHV_FRAC_DIV_EN;
2071 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW3(ch), tmp);
2072 
2073 	/* Program digital lock detect threshold */
2074 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW9(ch));
2075 	tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
2076 		      DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
2077 	tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
2078 	if (!m2_frac)
2079 		tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
2080 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW9(ch), tmp);
2081 
2082 	/* Loop filter */
2083 	if (clock->vco == 5400000) {
2084 		loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
2085 			DPIO_CHV_INT_COEFF(0x8) |
2086 			DPIO_CHV_GAIN_CTRL(0x1);
2087 		tribuf_calcntr = 0x9;
2088 	} else if (clock->vco <= 6200000) {
2089 		loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
2090 			DPIO_CHV_INT_COEFF(0xB) |
2091 			DPIO_CHV_GAIN_CTRL(0x3);
2092 		tribuf_calcntr = 0x9;
2093 	} else if (clock->vco <= 6480000) {
2094 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2095 			DPIO_CHV_INT_COEFF(0x9) |
2096 			DPIO_CHV_GAIN_CTRL(0x3);
2097 		tribuf_calcntr = 0x8;
2098 	} else {
2099 		/* Not supported. Apply the same limits as in the max case */
2100 		loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
2101 			DPIO_CHV_INT_COEFF(0x9) |
2102 			DPIO_CHV_GAIN_CTRL(0x3);
2103 		tribuf_calcntr = 0;
2104 	}
2105 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW6(ch), loopfilter);
2106 
2107 	tmp = vlv_dpio_read(display->drm, phy, CHV_PLL_DW8(ch));
2108 	tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
2109 	tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
2110 	vlv_dpio_write(display->drm, phy, CHV_PLL_DW8(ch), tmp);
2111 
2112 	/* AFC Recal */
2113 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch),
2114 		       vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch)) |
2115 		       DPIO_AFC_RECAL);
2116 
2117 	vlv_dpio_put(display->drm);
2118 }
2119 
2120 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
2121 {
2122 	struct intel_display *display = to_intel_display(crtc_state);
2123 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2124 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2125 	enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
2126 	enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
2127 	enum pipe pipe = crtc->pipe;
2128 	u32 tmp;
2129 
2130 	vlv_dpio_get(display->drm);
2131 
2132 	/* Enable back the 10bit clock to display controller */
2133 	tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2134 	tmp |= DPIO_DCLKP_EN;
2135 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), tmp);
2136 
2137 	vlv_dpio_put(display->drm);
2138 
2139 	/*
2140 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
2141 	 */
2142 	udelay(1);
2143 
2144 	/* Enable PLL */
2145 	intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
2146 
2147 	/* Check PLL is locked */
2148 	if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
2149 		drm_err(display->drm, "PLL %d failed to lock\n", pipe);
2150 }
2151 
2152 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
2153 {
2154 	struct intel_display *display = to_intel_display(crtc_state);
2155 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2156 	const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
2157 	enum pipe pipe = crtc->pipe;
2158 
2159 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2160 
2161 	/* PLL is protected by panel, make sure we can write it */
2162 	assert_pps_unlocked(display, pipe);
2163 
2164 	/* Enable Refclk and SSC */
2165 	intel_de_write(display, DPLL(display, pipe),
2166 		       hw_state->dpll & ~DPLL_VCO_ENABLE);
2167 
2168 	if (hw_state->dpll & DPLL_VCO_ENABLE) {
2169 		chv_prepare_pll(crtc_state);
2170 		_chv_enable_pll(crtc_state);
2171 	}
2172 
2173 	if (pipe != PIPE_A) {
2174 		/*
2175 		 * WaPixelRepeatModeFixForC0:chv
2176 		 *
2177 		 * DPLLCMD is AWOL. Use chicken bits to propagate
2178 		 * the value from DPLLBMD to either pipe B or C.
2179 		 */
2180 		intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
2181 		intel_de_write(display, DPLL_MD(display, PIPE_B),
2182 			       hw_state->dpll_md);
2183 		intel_de_write(display, CBR4_VLV, 0);
2184 		display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
2185 
2186 		/*
2187 		 * DPLLB VGA mode also seems to cause problems.
2188 		 * We should always have it disabled.
2189 		 */
2190 		drm_WARN_ON(display->drm,
2191 			    (intel_de_read(display, DPLL(display, PIPE_B)) &
2192 			     DPLL_VGA_MODE_DIS) == 0);
2193 	} else {
2194 		intel_de_write(display, DPLL_MD(display, pipe),
2195 			       hw_state->dpll_md);
2196 		intel_de_posting_read(display, DPLL_MD(display, pipe));
2197 	}
2198 }
2199 
2200 /**
2201  * vlv_force_pll_on - forcibly enable just the PLL
2202  * @display: display device
2203  * @pipe: pipe PLL to enable
2204  * @dpll: PLL configuration
2205  *
2206  * Enable the PLL for @pipe using the supplied @dpll config. To be used
2207  * in cases where we need the PLL enabled even when @pipe is not going to
2208  * be enabled.
2209  */
2210 int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
2211 		     const struct dpll *dpll)
2212 {
2213 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
2214 	struct intel_crtc_state *crtc_state;
2215 
2216 	crtc_state = intel_crtc_state_alloc(crtc);
2217 	if (!crtc_state)
2218 		return -ENOMEM;
2219 
2220 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
2221 	crtc_state->pixel_multiplier = 1;
2222 	crtc_state->dpll = *dpll;
2223 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2224 
2225 	if (display->platform.cherryview) {
2226 		chv_compute_dpll(crtc_state);
2227 		chv_enable_pll(crtc_state);
2228 	} else {
2229 		vlv_compute_dpll(crtc_state);
2230 		vlv_enable_pll(crtc_state);
2231 	}
2232 
2233 	intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2234 
2235 	return 0;
2236 }
2237 
2238 void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
2239 {
2240 	u32 val;
2241 
2242 	/* Make sure the pipe isn't still relying on us */
2243 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2244 
2245 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2246 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2247 	if (pipe != PIPE_A)
2248 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2249 
2250 	intel_de_write(display, DPLL(display, pipe), val);
2251 	intel_de_posting_read(display, DPLL(display, pipe));
2252 }
2253 
2254 void chv_disable_pll(struct intel_display *display, enum pipe pipe)
2255 {
2256 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
2257 	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
2258 	u32 val;
2259 
2260 	/* Make sure the pipe isn't still relying on us */
2261 	assert_transcoder_disabled(display, (enum transcoder)pipe);
2262 
2263 	val = DPLL_SSC_REF_CLK_CHV |
2264 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2265 	if (pipe != PIPE_A)
2266 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2267 
2268 	intel_de_write(display, DPLL(display, pipe), val);
2269 	intel_de_posting_read(display, DPLL(display, pipe));
2270 
2271 	vlv_dpio_get(display->drm);
2272 
2273 	/* Disable 10bit clock to display controller */
2274 	val = vlv_dpio_read(display->drm, phy, CHV_CMN_DW14(ch));
2275 	val &= ~DPIO_DCLKP_EN;
2276 	vlv_dpio_write(display->drm, phy, CHV_CMN_DW14(ch), val);
2277 
2278 	vlv_dpio_put(display->drm);
2279 }
2280 
2281 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2282 {
2283 	struct intel_display *display = to_intel_display(crtc_state);
2284 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2285 	enum pipe pipe = crtc->pipe;
2286 
2287 	/* Don't disable pipe or pipe PLLs if needed */
2288 	if (display->platform.i830)
2289 		return;
2290 
2291 	/* Make sure the pipe isn't still relying on us */
2292 	assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
2293 
2294 	intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
2295 	intel_de_posting_read(display, DPLL(display, pipe));
2296 }
2297 
2298 
2299 /**
2300  * vlv_force_pll_off - forcibly disable just the PLL
2301  * @display: display device
2302  * @pipe: pipe PLL to disable
2303  *
2304  * Disable the PLL for @pipe. To be used in cases where we need
2305  * the PLL enabled even when @pipe is not going to be enabled.
2306  */
2307 void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
2308 {
2309 	if (display->platform.cherryview)
2310 		chv_disable_pll(display, pipe);
2311 	else
2312 		vlv_disable_pll(display, pipe);
2313 }
2314 
2315 /* Only for pre-ILK configs */
2316 static void assert_pll(struct intel_display *display,
2317 		       enum pipe pipe, bool state)
2318 {
2319 	bool cur_state;
2320 
2321 	cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
2322 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
2323 				 "PLL state assertion failure (expected %s, current %s)\n",
2324 				 str_on_off(state), str_on_off(cur_state));
2325 }
2326 
2327 void assert_pll_enabled(struct intel_display *display, enum pipe pipe)
2328 {
2329 	assert_pll(display, pipe, true);
2330 }
2331 
2332 void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
2333 {
2334 	assert_pll(display, pipe, false);
2335 }
2336