xref: /linux/drivers/gpu/drm/i915/display/intel_dpll.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include "i915_reg.h"
10 #include "intel_atomic.h"
11 #include "intel_crtc.h"
12 #include "intel_cx0_phy.h"
13 #include "intel_de.h"
14 #include "intel_display.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_dpll.h"
18 #include "intel_lvds.h"
19 #include "intel_panel.h"
20 #include "intel_pps.h"
21 #include "intel_snps_phy.h"
22 #include "vlv_sideband.h"
23 
24 struct intel_dpll_funcs {
25 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
26 				  struct intel_crtc *crtc);
27 	int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
28 				    struct intel_crtc *crtc);
29 };
30 
31 struct intel_limit {
32 	struct {
33 		int min, max;
34 	} dot, vco, n, m, m1, m2, p, p1;
35 
36 	struct {
37 		int dot_limit;
38 		int p2_slow, p2_fast;
39 	} p2;
40 };
41 static const struct intel_limit intel_limits_i8xx_dac = {
42 	.dot = { .min = 25000, .max = 350000 },
43 	.vco = { .min = 908000, .max = 1512000 },
44 	.n = { .min = 2, .max = 16 },
45 	.m = { .min = 96, .max = 140 },
46 	.m1 = { .min = 18, .max = 26 },
47 	.m2 = { .min = 6, .max = 16 },
48 	.p = { .min = 4, .max = 128 },
49 	.p1 = { .min = 2, .max = 33 },
50 	.p2 = { .dot_limit = 165000,
51 		.p2_slow = 4, .p2_fast = 2 },
52 };
53 
54 static const struct intel_limit intel_limits_i8xx_dvo = {
55 	.dot = { .min = 25000, .max = 350000 },
56 	.vco = { .min = 908000, .max = 1512000 },
57 	.n = { .min = 2, .max = 16 },
58 	.m = { .min = 96, .max = 140 },
59 	.m1 = { .min = 18, .max = 26 },
60 	.m2 = { .min = 6, .max = 16 },
61 	.p = { .min = 4, .max = 128 },
62 	.p1 = { .min = 2, .max = 33 },
63 	.p2 = { .dot_limit = 165000,
64 		.p2_slow = 4, .p2_fast = 4 },
65 };
66 
67 static const struct intel_limit intel_limits_i8xx_lvds = {
68 	.dot = { .min = 25000, .max = 350000 },
69 	.vco = { .min = 908000, .max = 1512000 },
70 	.n = { .min = 2, .max = 16 },
71 	.m = { .min = 96, .max = 140 },
72 	.m1 = { .min = 18, .max = 26 },
73 	.m2 = { .min = 6, .max = 16 },
74 	.p = { .min = 4, .max = 128 },
75 	.p1 = { .min = 1, .max = 6 },
76 	.p2 = { .dot_limit = 165000,
77 		.p2_slow = 14, .p2_fast = 7 },
78 };
79 
80 static const struct intel_limit intel_limits_i9xx_sdvo = {
81 	.dot = { .min = 20000, .max = 400000 },
82 	.vco = { .min = 1400000, .max = 2800000 },
83 	.n = { .min = 1, .max = 6 },
84 	.m = { .min = 70, .max = 120 },
85 	.m1 = { .min = 8, .max = 18 },
86 	.m2 = { .min = 3, .max = 7 },
87 	.p = { .min = 5, .max = 80 },
88 	.p1 = { .min = 1, .max = 8 },
89 	.p2 = { .dot_limit = 200000,
90 		.p2_slow = 10, .p2_fast = 5 },
91 };
92 
93 static const struct intel_limit intel_limits_i9xx_lvds = {
94 	.dot = { .min = 20000, .max = 400000 },
95 	.vco = { .min = 1400000, .max = 2800000 },
96 	.n = { .min = 1, .max = 6 },
97 	.m = { .min = 70, .max = 120 },
98 	.m1 = { .min = 8, .max = 18 },
99 	.m2 = { .min = 3, .max = 7 },
100 	.p = { .min = 7, .max = 98 },
101 	.p1 = { .min = 1, .max = 8 },
102 	.p2 = { .dot_limit = 112000,
103 		.p2_slow = 14, .p2_fast = 7 },
104 };
105 
106 
107 static const struct intel_limit intel_limits_g4x_sdvo = {
108 	.dot = { .min = 25000, .max = 270000 },
109 	.vco = { .min = 1750000, .max = 3500000},
110 	.n = { .min = 1, .max = 4 },
111 	.m = { .min = 104, .max = 138 },
112 	.m1 = { .min = 17, .max = 23 },
113 	.m2 = { .min = 5, .max = 11 },
114 	.p = { .min = 10, .max = 30 },
115 	.p1 = { .min = 1, .max = 3},
116 	.p2 = { .dot_limit = 270000,
117 		.p2_slow = 10,
118 		.p2_fast = 10
119 	},
120 };
121 
122 static const struct intel_limit intel_limits_g4x_hdmi = {
123 	.dot = { .min = 22000, .max = 400000 },
124 	.vco = { .min = 1750000, .max = 3500000},
125 	.n = { .min = 1, .max = 4 },
126 	.m = { .min = 104, .max = 138 },
127 	.m1 = { .min = 16, .max = 23 },
128 	.m2 = { .min = 5, .max = 11 },
129 	.p = { .min = 5, .max = 80 },
130 	.p1 = { .min = 1, .max = 8},
131 	.p2 = { .dot_limit = 165000,
132 		.p2_slow = 10, .p2_fast = 5 },
133 };
134 
135 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
136 	.dot = { .min = 20000, .max = 115000 },
137 	.vco = { .min = 1750000, .max = 3500000 },
138 	.n = { .min = 1, .max = 3 },
139 	.m = { .min = 104, .max = 138 },
140 	.m1 = { .min = 17, .max = 23 },
141 	.m2 = { .min = 5, .max = 11 },
142 	.p = { .min = 28, .max = 112 },
143 	.p1 = { .min = 2, .max = 8 },
144 	.p2 = { .dot_limit = 0,
145 		.p2_slow = 14, .p2_fast = 14
146 	},
147 };
148 
149 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
150 	.dot = { .min = 80000, .max = 224000 },
151 	.vco = { .min = 1750000, .max = 3500000 },
152 	.n = { .min = 1, .max = 3 },
153 	.m = { .min = 104, .max = 138 },
154 	.m1 = { .min = 17, .max = 23 },
155 	.m2 = { .min = 5, .max = 11 },
156 	.p = { .min = 14, .max = 42 },
157 	.p1 = { .min = 2, .max = 6 },
158 	.p2 = { .dot_limit = 0,
159 		.p2_slow = 7, .p2_fast = 7
160 	},
161 };
162 
163 static const struct intel_limit pnv_limits_sdvo = {
164 	.dot = { .min = 20000, .max = 400000},
165 	.vco = { .min = 1700000, .max = 3500000 },
166 	/* Pineview's Ncounter is a ring counter */
167 	.n = { .min = 3, .max = 6 },
168 	.m = { .min = 2, .max = 256 },
169 	/* Pineview only has one combined m divider, which we treat as m2. */
170 	.m1 = { .min = 0, .max = 0 },
171 	.m2 = { .min = 0, .max = 254 },
172 	.p = { .min = 5, .max = 80 },
173 	.p1 = { .min = 1, .max = 8 },
174 	.p2 = { .dot_limit = 200000,
175 		.p2_slow = 10, .p2_fast = 5 },
176 };
177 
178 static const struct intel_limit pnv_limits_lvds = {
179 	.dot = { .min = 20000, .max = 400000 },
180 	.vco = { .min = 1700000, .max = 3500000 },
181 	.n = { .min = 3, .max = 6 },
182 	.m = { .min = 2, .max = 256 },
183 	.m1 = { .min = 0, .max = 0 },
184 	.m2 = { .min = 0, .max = 254 },
185 	.p = { .min = 7, .max = 112 },
186 	.p1 = { .min = 1, .max = 8 },
187 	.p2 = { .dot_limit = 112000,
188 		.p2_slow = 14, .p2_fast = 14 },
189 };
190 
191 /* Ironlake / Sandybridge
192  *
193  * We calculate clock using (register_value + 2) for N/M1/M2, so here
194  * the range value for them is (actual_value - 2).
195  */
196 static const struct intel_limit ilk_limits_dac = {
197 	.dot = { .min = 25000, .max = 350000 },
198 	.vco = { .min = 1760000, .max = 3510000 },
199 	.n = { .min = 1, .max = 5 },
200 	.m = { .min = 79, .max = 127 },
201 	.m1 = { .min = 12, .max = 22 },
202 	.m2 = { .min = 5, .max = 9 },
203 	.p = { .min = 5, .max = 80 },
204 	.p1 = { .min = 1, .max = 8 },
205 	.p2 = { .dot_limit = 225000,
206 		.p2_slow = 10, .p2_fast = 5 },
207 };
208 
209 static const struct intel_limit ilk_limits_single_lvds = {
210 	.dot = { .min = 25000, .max = 350000 },
211 	.vco = { .min = 1760000, .max = 3510000 },
212 	.n = { .min = 1, .max = 3 },
213 	.m = { .min = 79, .max = 118 },
214 	.m1 = { .min = 12, .max = 22 },
215 	.m2 = { .min = 5, .max = 9 },
216 	.p = { .min = 28, .max = 112 },
217 	.p1 = { .min = 2, .max = 8 },
218 	.p2 = { .dot_limit = 225000,
219 		.p2_slow = 14, .p2_fast = 14 },
220 };
221 
222 static const struct intel_limit ilk_limits_dual_lvds = {
223 	.dot = { .min = 25000, .max = 350000 },
224 	.vco = { .min = 1760000, .max = 3510000 },
225 	.n = { .min = 1, .max = 3 },
226 	.m = { .min = 79, .max = 127 },
227 	.m1 = { .min = 12, .max = 22 },
228 	.m2 = { .min = 5, .max = 9 },
229 	.p = { .min = 14, .max = 56 },
230 	.p1 = { .min = 2, .max = 8 },
231 	.p2 = { .dot_limit = 225000,
232 		.p2_slow = 7, .p2_fast = 7 },
233 };
234 
235 /* LVDS 100mhz refclk limits. */
236 static const struct intel_limit ilk_limits_single_lvds_100m = {
237 	.dot = { .min = 25000, .max = 350000 },
238 	.vco = { .min = 1760000, .max = 3510000 },
239 	.n = { .min = 1, .max = 2 },
240 	.m = { .min = 79, .max = 126 },
241 	.m1 = { .min = 12, .max = 22 },
242 	.m2 = { .min = 5, .max = 9 },
243 	.p = { .min = 28, .max = 112 },
244 	.p1 = { .min = 2, .max = 8 },
245 	.p2 = { .dot_limit = 225000,
246 		.p2_slow = 14, .p2_fast = 14 },
247 };
248 
249 static const struct intel_limit ilk_limits_dual_lvds_100m = {
250 	.dot = { .min = 25000, .max = 350000 },
251 	.vco = { .min = 1760000, .max = 3510000 },
252 	.n = { .min = 1, .max = 3 },
253 	.m = { .min = 79, .max = 126 },
254 	.m1 = { .min = 12, .max = 22 },
255 	.m2 = { .min = 5, .max = 9 },
256 	.p = { .min = 14, .max = 42 },
257 	.p1 = { .min = 2, .max = 6 },
258 	.p2 = { .dot_limit = 225000,
259 		.p2_slow = 7, .p2_fast = 7 },
260 };
261 
262 static const struct intel_limit intel_limits_vlv = {
263 	 /*
264 	  * These are based on the data rate limits (measured in fast clocks)
265 	  * since those are the strictest limits we have. The fast
266 	  * clock and actual rate limits are more relaxed, so checking
267 	  * them would make no difference.
268 	  */
269 	.dot = { .min = 25000, .max = 270000 },
270 	.vco = { .min = 4000000, .max = 6000000 },
271 	.n = { .min = 1, .max = 7 },
272 	.m1 = { .min = 2, .max = 3 },
273 	.m2 = { .min = 11, .max = 156 },
274 	.p1 = { .min = 2, .max = 3 },
275 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
276 };
277 
278 static const struct intel_limit intel_limits_chv = {
279 	/*
280 	 * These are based on the data rate limits (measured in fast clocks)
281 	 * since those are the strictest limits we have.  The fast
282 	 * clock and actual rate limits are more relaxed, so checking
283 	 * them would make no difference.
284 	 */
285 	.dot = { .min = 25000, .max = 540000 },
286 	.vco = { .min = 4800000, .max = 6480000 },
287 	.n = { .min = 1, .max = 1 },
288 	.m1 = { .min = 2, .max = 2 },
289 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
290 	.p1 = { .min = 2, .max = 4 },
291 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
292 };
293 
294 static const struct intel_limit intel_limits_bxt = {
295 	.dot = { .min = 25000, .max = 594000 },
296 	.vco = { .min = 4800000, .max = 6700000 },
297 	.n = { .min = 1, .max = 1 },
298 	.m1 = { .min = 2, .max = 2 },
299 	/* FIXME: find real m2 limits */
300 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
301 	.p1 = { .min = 2, .max = 4 },
302 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
303 };
304 
305 /*
306  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
307  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
308  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
309  * The helpers' return value is the rate of the clock that is fed to the
310  * display engine's pipe which can be the above fast dot clock rate or a
311  * divided-down version of it.
312  */
313 /* m1 is reserved as 0 in Pineview, n is a ring counter */
314 int pnv_calc_dpll_params(int refclk, struct dpll *clock)
315 {
316 	clock->m = clock->m2 + 2;
317 	clock->p = clock->p1 * clock->p2;
318 
319 	clock->vco = clock->n == 0 ? 0 :
320 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
321 	clock->dot = clock->p == 0 ? 0 :
322 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
323 
324 	return clock->dot;
325 }
326 
327 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
328 {
329 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
330 }
331 
332 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
333 {
334 	clock->m = i9xx_dpll_compute_m(clock);
335 	clock->p = clock->p1 * clock->p2;
336 
337 	clock->vco = clock->n + 2 == 0 ? 0 :
338 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
339 	clock->dot = clock->p == 0 ? 0 :
340 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
341 
342 	return clock->dot;
343 }
344 
345 int vlv_calc_dpll_params(int refclk, struct dpll *clock)
346 {
347 	clock->m = clock->m1 * clock->m2;
348 	clock->p = clock->p1 * clock->p2 * 5;
349 
350 	clock->vco = clock->n == 0 ? 0 :
351 		DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
352 	clock->dot = clock->p == 0 ? 0 :
353 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
354 
355 	return clock->dot;
356 }
357 
358 int chv_calc_dpll_params(int refclk, struct dpll *clock)
359 {
360 	clock->m = clock->m1 * clock->m2;
361 	clock->p = clock->p1 * clock->p2 * 5;
362 
363 	clock->vco = clock->n == 0 ? 0 :
364 		DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22);
365 	clock->dot = clock->p == 0 ? 0 :
366 		DIV_ROUND_CLOSEST(clock->vco, clock->p);
367 
368 	return clock->dot;
369 }
370 
371 /*
372  * Returns whether the given set of divisors are valid for a given refclk with
373  * the given connectors.
374  */
375 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
376 			       const struct intel_limit *limit,
377 			       const struct dpll *clock)
378 {
379 	if (clock->n < limit->n.min || limit->n.max < clock->n)
380 		return false;
381 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
382 		return false;
383 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
384 		return false;
385 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
386 		return false;
387 
388 	if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv))
389 		if (clock->m1 <= clock->m2)
390 			return false;
391 
392 	if (!IS_LP(dev_priv)) {
393 		if (clock->p < limit->p.min || limit->p.max < clock->p)
394 			return false;
395 		if (clock->m < limit->m.min || limit->m.max < clock->m)
396 			return false;
397 	}
398 
399 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
400 		return false;
401 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
402 	 * connector, etc., rather than just a single range.
403 	 */
404 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
405 		return false;
406 
407 	return true;
408 }
409 
410 static int
411 i9xx_select_p2_div(const struct intel_limit *limit,
412 		   const struct intel_crtc_state *crtc_state,
413 		   int target)
414 {
415 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
416 
417 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
418 		/*
419 		 * For LVDS just rely on its current settings for dual-channel.
420 		 * We haven't figured out how to reliably set up different
421 		 * single/dual channel state, if we even can.
422 		 */
423 		if (intel_is_dual_link_lvds(dev_priv))
424 			return limit->p2.p2_fast;
425 		else
426 			return limit->p2.p2_slow;
427 	} else {
428 		if (target < limit->p2.dot_limit)
429 			return limit->p2.p2_slow;
430 		else
431 			return limit->p2.p2_fast;
432 	}
433 }
434 
435 /*
436  * Returns a set of divisors for the desired target clock with the given
437  * refclk, or FALSE.
438  *
439  * Target and reference clocks are specified in kHz.
440  *
441  * If match_clock is provided, then best_clock P divider must match the P
442  * divider from @match_clock used for LVDS downclocking.
443  */
444 static bool
445 i9xx_find_best_dpll(const struct intel_limit *limit,
446 		    struct intel_crtc_state *crtc_state,
447 		    int target, int refclk,
448 		    const struct dpll *match_clock,
449 		    struct dpll *best_clock)
450 {
451 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
452 	struct dpll clock;
453 	int err = target;
454 
455 	memset(best_clock, 0, sizeof(*best_clock));
456 
457 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
458 
459 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
460 	     clock.m1++) {
461 		for (clock.m2 = limit->m2.min;
462 		     clock.m2 <= limit->m2.max; clock.m2++) {
463 			if (clock.m2 >= clock.m1)
464 				break;
465 			for (clock.n = limit->n.min;
466 			     clock.n <= limit->n.max; clock.n++) {
467 				for (clock.p1 = limit->p1.min;
468 					clock.p1 <= limit->p1.max; clock.p1++) {
469 					int this_err;
470 
471 					i9xx_calc_dpll_params(refclk, &clock);
472 					if (!intel_pll_is_valid(to_i915(dev),
473 								limit,
474 								&clock))
475 						continue;
476 					if (match_clock &&
477 					    clock.p != match_clock->p)
478 						continue;
479 
480 					this_err = abs(clock.dot - target);
481 					if (this_err < err) {
482 						*best_clock = clock;
483 						err = this_err;
484 					}
485 				}
486 			}
487 		}
488 	}
489 
490 	return (err != target);
491 }
492 
493 /*
494  * Returns a set of divisors for the desired target clock with the given
495  * refclk, or FALSE.
496  *
497  * Target and reference clocks are specified in kHz.
498  *
499  * If match_clock is provided, then best_clock P divider must match the P
500  * divider from @match_clock used for LVDS downclocking.
501  */
502 static bool
503 pnv_find_best_dpll(const struct intel_limit *limit,
504 		   struct intel_crtc_state *crtc_state,
505 		   int target, int refclk,
506 		   const struct dpll *match_clock,
507 		   struct dpll *best_clock)
508 {
509 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
510 	struct dpll clock;
511 	int err = target;
512 
513 	memset(best_clock, 0, sizeof(*best_clock));
514 
515 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
516 
517 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
518 	     clock.m1++) {
519 		for (clock.m2 = limit->m2.min;
520 		     clock.m2 <= limit->m2.max; clock.m2++) {
521 			for (clock.n = limit->n.min;
522 			     clock.n <= limit->n.max; clock.n++) {
523 				for (clock.p1 = limit->p1.min;
524 					clock.p1 <= limit->p1.max; clock.p1++) {
525 					int this_err;
526 
527 					pnv_calc_dpll_params(refclk, &clock);
528 					if (!intel_pll_is_valid(to_i915(dev),
529 								limit,
530 								&clock))
531 						continue;
532 					if (match_clock &&
533 					    clock.p != match_clock->p)
534 						continue;
535 
536 					this_err = abs(clock.dot - target);
537 					if (this_err < err) {
538 						*best_clock = clock;
539 						err = this_err;
540 					}
541 				}
542 			}
543 		}
544 	}
545 
546 	return (err != target);
547 }
548 
549 /*
550  * Returns a set of divisors for the desired target clock with the given
551  * refclk, or FALSE.
552  *
553  * Target and reference clocks are specified in kHz.
554  *
555  * If match_clock is provided, then best_clock P divider must match the P
556  * divider from @match_clock used for LVDS downclocking.
557  */
558 static bool
559 g4x_find_best_dpll(const struct intel_limit *limit,
560 		   struct intel_crtc_state *crtc_state,
561 		   int target, int refclk,
562 		   const struct dpll *match_clock,
563 		   struct dpll *best_clock)
564 {
565 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
566 	struct dpll clock;
567 	int max_n;
568 	bool found = false;
569 	/* approximately equals target * 0.00585 */
570 	int err_most = (target >> 8) + (target >> 9);
571 
572 	memset(best_clock, 0, sizeof(*best_clock));
573 
574 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
575 
576 	max_n = limit->n.max;
577 	/* based on hardware requirement, prefer smaller n to precision */
578 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
579 		/* based on hardware requirement, prefere larger m1,m2 */
580 		for (clock.m1 = limit->m1.max;
581 		     clock.m1 >= limit->m1.min; clock.m1--) {
582 			for (clock.m2 = limit->m2.max;
583 			     clock.m2 >= limit->m2.min; clock.m2--) {
584 				for (clock.p1 = limit->p1.max;
585 				     clock.p1 >= limit->p1.min; clock.p1--) {
586 					int this_err;
587 
588 					i9xx_calc_dpll_params(refclk, &clock);
589 					if (!intel_pll_is_valid(to_i915(dev),
590 								limit,
591 								&clock))
592 						continue;
593 
594 					this_err = abs(clock.dot - target);
595 					if (this_err < err_most) {
596 						*best_clock = clock;
597 						err_most = this_err;
598 						max_n = clock.n;
599 						found = true;
600 					}
601 				}
602 			}
603 		}
604 	}
605 	return found;
606 }
607 
608 /*
609  * Check if the calculated PLL configuration is more optimal compared to the
610  * best configuration and error found so far. Return the calculated error.
611  */
612 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
613 			       const struct dpll *calculated_clock,
614 			       const struct dpll *best_clock,
615 			       unsigned int best_error_ppm,
616 			       unsigned int *error_ppm)
617 {
618 	/*
619 	 * For CHV ignore the error and consider only the P value.
620 	 * Prefer a bigger P value based on HW requirements.
621 	 */
622 	if (IS_CHERRYVIEW(to_i915(dev))) {
623 		*error_ppm = 0;
624 
625 		return calculated_clock->p > best_clock->p;
626 	}
627 
628 	if (drm_WARN_ON_ONCE(dev, !target_freq))
629 		return false;
630 
631 	*error_ppm = div_u64(1000000ULL *
632 				abs(target_freq - calculated_clock->dot),
633 			     target_freq);
634 	/*
635 	 * Prefer a better P value over a better (smaller) error if the error
636 	 * is small. Ensure this preference for future configurations too by
637 	 * setting the error to 0.
638 	 */
639 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
640 		*error_ppm = 0;
641 
642 		return true;
643 	}
644 
645 	return *error_ppm + 10 < best_error_ppm;
646 }
647 
648 /*
649  * Returns a set of divisors for the desired target clock with the given
650  * refclk, or FALSE.
651  */
652 static bool
653 vlv_find_best_dpll(const struct intel_limit *limit,
654 		   struct intel_crtc_state *crtc_state,
655 		   int target, int refclk,
656 		   const struct dpll *match_clock,
657 		   struct dpll *best_clock)
658 {
659 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
660 	struct drm_device *dev = crtc->base.dev;
661 	struct dpll clock;
662 	unsigned int bestppm = 1000000;
663 	/* min update 19.2 MHz */
664 	int max_n = min(limit->n.max, refclk / 19200);
665 	bool found = false;
666 
667 	memset(best_clock, 0, sizeof(*best_clock));
668 
669 	/* based on hardware requirement, prefer smaller n to precision */
670 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
671 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
672 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
673 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
674 				clock.p = clock.p1 * clock.p2 * 5;
675 				/* based on hardware requirement, prefer bigger m1,m2 values */
676 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
677 					unsigned int ppm;
678 
679 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
680 								     refclk * clock.m1);
681 
682 					vlv_calc_dpll_params(refclk, &clock);
683 
684 					if (!intel_pll_is_valid(to_i915(dev),
685 								limit,
686 								&clock))
687 						continue;
688 
689 					if (!vlv_PLL_is_optimal(dev, target,
690 								&clock,
691 								best_clock,
692 								bestppm, &ppm))
693 						continue;
694 
695 					*best_clock = clock;
696 					bestppm = ppm;
697 					found = true;
698 				}
699 			}
700 		}
701 	}
702 
703 	return found;
704 }
705 
706 /*
707  * Returns a set of divisors for the desired target clock with the given
708  * refclk, or FALSE.
709  */
710 static bool
711 chv_find_best_dpll(const struct intel_limit *limit,
712 		   struct intel_crtc_state *crtc_state,
713 		   int target, int refclk,
714 		   const struct dpll *match_clock,
715 		   struct dpll *best_clock)
716 {
717 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
718 	struct drm_device *dev = crtc->base.dev;
719 	unsigned int best_error_ppm;
720 	struct dpll clock;
721 	u64 m2;
722 	int found = false;
723 
724 	memset(best_clock, 0, sizeof(*best_clock));
725 	best_error_ppm = 1000000;
726 
727 	/*
728 	 * Based on hardware doc, the n always set to 1, and m1 always
729 	 * set to 2.  If requires to support 200Mhz refclk, we need to
730 	 * revisit this because n may not 1 anymore.
731 	 */
732 	clock.n = 1;
733 	clock.m1 = 2;
734 
735 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
736 		for (clock.p2 = limit->p2.p2_fast;
737 				clock.p2 >= limit->p2.p2_slow;
738 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
739 			unsigned int error_ppm;
740 
741 			clock.p = clock.p1 * clock.p2 * 5;
742 
743 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
744 						   refclk * clock.m1);
745 
746 			if (m2 > INT_MAX/clock.m1)
747 				continue;
748 
749 			clock.m2 = m2;
750 
751 			chv_calc_dpll_params(refclk, &clock);
752 
753 			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
754 				continue;
755 
756 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
757 						best_error_ppm, &error_ppm))
758 				continue;
759 
760 			*best_clock = clock;
761 			best_error_ppm = error_ppm;
762 			found = true;
763 		}
764 	}
765 
766 	return found;
767 }
768 
769 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
770 			struct dpll *best_clock)
771 {
772 	const struct intel_limit *limit = &intel_limits_bxt;
773 	int refclk = 100000;
774 
775 	return chv_find_best_dpll(limit, crtc_state,
776 				  crtc_state->port_clock, refclk,
777 				  NULL, best_clock);
778 }
779 
780 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
781 {
782 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
783 }
784 
785 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
786 {
787 	return (1 << dpll->n) << 16 | dpll->m2;
788 }
789 
790 static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
791 				     const struct dpll *clock,
792 				     const struct dpll *reduced_clock)
793 {
794 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
795 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
796 	u32 fp, fp2;
797 
798 	if (IS_PINEVIEW(dev_priv)) {
799 		fp = pnv_dpll_compute_fp(clock);
800 		fp2 = pnv_dpll_compute_fp(reduced_clock);
801 	} else {
802 		fp = i9xx_dpll_compute_fp(clock);
803 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
804 	}
805 
806 	crtc_state->dpll_hw_state.fp0 = fp;
807 	crtc_state->dpll_hw_state.fp1 = fp2;
808 }
809 
810 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
811 			      const struct dpll *clock,
812 			      const struct dpll *reduced_clock)
813 {
814 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
815 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
816 	u32 dpll;
817 
818 	i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
819 
820 	dpll = DPLL_VGA_MODE_DIS;
821 
822 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
823 		dpll |= DPLLB_MODE_LVDS;
824 	else
825 		dpll |= DPLLB_MODE_DAC_SERIAL;
826 
827 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
828 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
829 		dpll |= (crtc_state->pixel_multiplier - 1)
830 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
831 	}
832 
833 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
834 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
835 		dpll |= DPLL_SDVO_HIGH_SPEED;
836 
837 	if (intel_crtc_has_dp_encoder(crtc_state))
838 		dpll |= DPLL_SDVO_HIGH_SPEED;
839 
840 	/* compute bitmask from p1 value */
841 	if (IS_G4X(dev_priv)) {
842 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
843 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
844 	} else if (IS_PINEVIEW(dev_priv)) {
845 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
846 		WARN_ON(reduced_clock->p1 != clock->p1);
847 	} else {
848 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
849 		WARN_ON(reduced_clock->p1 != clock->p1);
850 	}
851 
852 	switch (clock->p2) {
853 	case 5:
854 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
855 		break;
856 	case 7:
857 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
858 		break;
859 	case 10:
860 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
861 		break;
862 	case 14:
863 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
864 		break;
865 	}
866 	WARN_ON(reduced_clock->p2 != clock->p2);
867 
868 	if (DISPLAY_VER(dev_priv) >= 4)
869 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
870 
871 	if (crtc_state->sdvo_tv_clock)
872 		dpll |= PLL_REF_INPUT_TVCLKINBC;
873 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
874 		 intel_panel_use_ssc(dev_priv))
875 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
876 	else
877 		dpll |= PLL_REF_INPUT_DREFCLK;
878 
879 	dpll |= DPLL_VCO_ENABLE;
880 	crtc_state->dpll_hw_state.dpll = dpll;
881 
882 	if (DISPLAY_VER(dev_priv) >= 4) {
883 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
884 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
885 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
886 	}
887 }
888 
889 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
890 			      const struct dpll *clock,
891 			      const struct dpll *reduced_clock)
892 {
893 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
894 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
895 	u32 dpll;
896 
897 	i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
898 
899 	dpll = DPLL_VGA_MODE_DIS;
900 
901 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
902 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
903 	} else {
904 		if (clock->p1 == 2)
905 			dpll |= PLL_P1_DIVIDE_BY_TWO;
906 		else
907 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
908 		if (clock->p2 == 4)
909 			dpll |= PLL_P2_DIVIDE_BY_4;
910 	}
911 	WARN_ON(reduced_clock->p1 != clock->p1);
912 	WARN_ON(reduced_clock->p2 != clock->p2);
913 
914 	/*
915 	 * Bspec:
916 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
917 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
918 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
919 	 *  Enable) must be set to “1” in both the DPLL A Control Register
920 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
921 	 *
922 	 * For simplicity We simply keep both bits always enabled in
923 	 * both DPLLS. The spec says we should disable the DVO 2X clock
924 	 * when not needed, but this seems to work fine in practice.
925 	 */
926 	if (IS_I830(dev_priv) ||
927 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
928 		dpll |= DPLL_DVO_2X_MODE;
929 
930 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
931 	    intel_panel_use_ssc(dev_priv))
932 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
933 	else
934 		dpll |= PLL_REF_INPUT_DREFCLK;
935 
936 	dpll |= DPLL_VCO_ENABLE;
937 	crtc_state->dpll_hw_state.dpll = dpll;
938 }
939 
940 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
941 				  struct intel_crtc *crtc)
942 {
943 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
944 	struct intel_crtc_state *crtc_state =
945 		intel_atomic_get_new_crtc_state(state, crtc);
946 	struct intel_encoder *encoder =
947 		intel_get_crtc_new_encoder(state, crtc_state);
948 	int ret;
949 
950 	if (DISPLAY_VER(dev_priv) < 11 &&
951 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
952 		return 0;
953 
954 	ret = intel_compute_shared_dplls(state, crtc, encoder);
955 	if (ret)
956 		return ret;
957 
958 	/* FIXME this is a mess */
959 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
960 		return 0;
961 
962 	/* CRT dotclock is determined via other means */
963 	if (!crtc_state->has_pch_encoder)
964 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
965 
966 	return 0;
967 }
968 
969 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
970 				    struct intel_crtc *crtc)
971 {
972 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
973 	struct intel_crtc_state *crtc_state =
974 		intel_atomic_get_new_crtc_state(state, crtc);
975 	struct intel_encoder *encoder =
976 		intel_get_crtc_new_encoder(state, crtc_state);
977 
978 	if (DISPLAY_VER(dev_priv) < 11 &&
979 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
980 		return 0;
981 
982 	return intel_reserve_shared_dplls(state, crtc, encoder);
983 }
984 
985 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
986 				  struct intel_crtc *crtc)
987 {
988 	struct intel_crtc_state *crtc_state =
989 		intel_atomic_get_new_crtc_state(state, crtc);
990 	struct intel_encoder *encoder =
991 		intel_get_crtc_new_encoder(state, crtc_state);
992 	int ret;
993 
994 	ret = intel_mpllb_calc_state(crtc_state, encoder);
995 	if (ret)
996 		return ret;
997 
998 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
999 
1000 	return 0;
1001 }
1002 
1003 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1004 				  struct intel_crtc *crtc)
1005 {
1006 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1007 	struct intel_crtc_state *crtc_state =
1008 		intel_atomic_get_new_crtc_state(state, crtc);
1009 	struct intel_encoder *encoder =
1010 		intel_get_crtc_new_encoder(state, crtc_state);
1011 	enum phy phy = intel_port_to_phy(i915, encoder->port);
1012 	int ret;
1013 
1014 	ret = intel_cx0pll_calc_state(crtc_state, encoder);
1015 	if (ret)
1016 		return ret;
1017 
1018 	/* TODO: Do the readback via intel_compute_shared_dplls() */
1019 	if (intel_is_c10phy(i915, phy))
1020 		crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
1021 	else
1022 		crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
1023 
1024 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1025 
1026 	return 0;
1027 }
1028 
1029 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1030 {
1031 	return dpll->m < factor * dpll->n;
1032 }
1033 
1034 static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
1035 				    const struct dpll *clock,
1036 				    const struct dpll *reduced_clock)
1037 {
1038 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1039 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1040 	u32 fp, fp2;
1041 	int factor;
1042 
1043 	/* Enable autotuning of the PLL clock (if permissible) */
1044 	factor = 21;
1045 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1046 		if ((intel_panel_use_ssc(dev_priv) &&
1047 		     dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
1048 		    (HAS_PCH_IBX(dev_priv) &&
1049 		     intel_is_dual_link_lvds(dev_priv)))
1050 			factor = 25;
1051 	} else if (crtc_state->sdvo_tv_clock) {
1052 		factor = 20;
1053 	}
1054 
1055 	fp = i9xx_dpll_compute_fp(clock);
1056 	if (ilk_needs_fb_cb_tune(clock, factor))
1057 		fp |= FP_CB_TUNE;
1058 
1059 	fp2 = i9xx_dpll_compute_fp(reduced_clock);
1060 	if (ilk_needs_fb_cb_tune(reduced_clock, factor))
1061 		fp2 |= FP_CB_TUNE;
1062 
1063 	crtc_state->dpll_hw_state.fp0 = fp;
1064 	crtc_state->dpll_hw_state.fp1 = fp2;
1065 }
1066 
1067 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1068 			     const struct dpll *clock,
1069 			     const struct dpll *reduced_clock)
1070 {
1071 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1072 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1073 	u32 dpll;
1074 
1075 	ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
1076 
1077 	dpll = 0;
1078 
1079 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1080 		dpll |= DPLLB_MODE_LVDS;
1081 	else
1082 		dpll |= DPLLB_MODE_DAC_SERIAL;
1083 
1084 	dpll |= (crtc_state->pixel_multiplier - 1)
1085 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1086 
1087 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1088 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1089 		dpll |= DPLL_SDVO_HIGH_SPEED;
1090 
1091 	if (intel_crtc_has_dp_encoder(crtc_state))
1092 		dpll |= DPLL_SDVO_HIGH_SPEED;
1093 
1094 	/*
1095 	 * The high speed IO clock is only really required for
1096 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1097 	 * possible to share the DPLL between CRT and HDMI. Enabling
1098 	 * the clock needlessly does no real harm, except use up a
1099 	 * bit of power potentially.
1100 	 *
1101 	 * We'll limit this to IVB with 3 pipes, since it has only two
1102 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1103 	 * driving PCH ports at the same time. On SNB we could do this,
1104 	 * and potentially avoid enabling the second DPLL, but it's not
1105 	 * clear if it''s a win or loss power wise. No point in doing
1106 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1107 	 */
1108 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
1109 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1110 		dpll |= DPLL_SDVO_HIGH_SPEED;
1111 
1112 	/* compute bitmask from p1 value */
1113 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1114 	/* also FPA1 */
1115 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1116 
1117 	switch (clock->p2) {
1118 	case 5:
1119 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1120 		break;
1121 	case 7:
1122 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1123 		break;
1124 	case 10:
1125 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1126 		break;
1127 	case 14:
1128 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1129 		break;
1130 	}
1131 	WARN_ON(reduced_clock->p2 != clock->p2);
1132 
1133 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1134 	    intel_panel_use_ssc(dev_priv))
1135 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1136 	else
1137 		dpll |= PLL_REF_INPUT_DREFCLK;
1138 
1139 	dpll |= DPLL_VCO_ENABLE;
1140 
1141 	crtc_state->dpll_hw_state.dpll = dpll;
1142 }
1143 
1144 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1145 				  struct intel_crtc *crtc)
1146 {
1147 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1148 	struct intel_crtc_state *crtc_state =
1149 		intel_atomic_get_new_crtc_state(state, crtc);
1150 	const struct intel_limit *limit;
1151 	int refclk = 120000;
1152 	int ret;
1153 
1154 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1155 	if (!crtc_state->has_pch_encoder)
1156 		return 0;
1157 
1158 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1159 		if (intel_panel_use_ssc(dev_priv)) {
1160 			drm_dbg_kms(&dev_priv->drm,
1161 				    "using SSC reference clock of %d kHz\n",
1162 				    dev_priv->display.vbt.lvds_ssc_freq);
1163 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1164 		}
1165 
1166 		if (intel_is_dual_link_lvds(dev_priv)) {
1167 			if (refclk == 100000)
1168 				limit = &ilk_limits_dual_lvds_100m;
1169 			else
1170 				limit = &ilk_limits_dual_lvds;
1171 		} else {
1172 			if (refclk == 100000)
1173 				limit = &ilk_limits_single_lvds_100m;
1174 			else
1175 				limit = &ilk_limits_single_lvds;
1176 		}
1177 	} else {
1178 		limit = &ilk_limits_dac;
1179 	}
1180 
1181 	if (!crtc_state->clock_set &&
1182 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1183 				refclk, NULL, &crtc_state->dpll))
1184 		return -EINVAL;
1185 
1186 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1187 
1188 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1189 			 &crtc_state->dpll);
1190 
1191 	ret = intel_compute_shared_dplls(state, crtc, NULL);
1192 	if (ret)
1193 		return ret;
1194 
1195 	crtc_state->port_clock = crtc_state->dpll.dot;
1196 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1197 
1198 	return ret;
1199 }
1200 
1201 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
1202 				    struct intel_crtc *crtc)
1203 {
1204 	struct intel_crtc_state *crtc_state =
1205 		intel_atomic_get_new_crtc_state(state, crtc);
1206 
1207 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1208 	if (!crtc_state->has_pch_encoder)
1209 		return 0;
1210 
1211 	return intel_reserve_shared_dplls(state, crtc, NULL);
1212 }
1213 
1214 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1215 {
1216 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1217 
1218 	crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1219 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1220 	if (crtc->pipe != PIPE_A)
1221 		crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1222 
1223 	/* DPLL not used with DSI, but still need the rest set up */
1224 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1225 		crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
1226 			DPLL_EXT_BUFFER_ENABLE_VLV;
1227 
1228 	crtc_state->dpll_hw_state.dpll_md =
1229 		(crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1230 }
1231 
1232 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1233 {
1234 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1235 
1236 	crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
1237 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1238 	if (crtc->pipe != PIPE_A)
1239 		crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1240 
1241 	/* DPLL not used with DSI, but still need the rest set up */
1242 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1243 		crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
1244 
1245 	crtc_state->dpll_hw_state.dpll_md =
1246 		(crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1247 }
1248 
1249 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1250 				  struct intel_crtc *crtc)
1251 {
1252 	struct intel_crtc_state *crtc_state =
1253 		intel_atomic_get_new_crtc_state(state, crtc);
1254 	const struct intel_limit *limit = &intel_limits_chv;
1255 	int refclk = 100000;
1256 
1257 	if (!crtc_state->clock_set &&
1258 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1259 				refclk, NULL, &crtc_state->dpll))
1260 		return -EINVAL;
1261 
1262 	chv_calc_dpll_params(refclk, &crtc_state->dpll);
1263 
1264 	chv_compute_dpll(crtc_state);
1265 
1266 	/* FIXME this is a mess */
1267 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1268 		return 0;
1269 
1270 	crtc_state->port_clock = crtc_state->dpll.dot;
1271 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1272 
1273 	return 0;
1274 }
1275 
1276 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1277 				  struct intel_crtc *crtc)
1278 {
1279 	struct intel_crtc_state *crtc_state =
1280 		intel_atomic_get_new_crtc_state(state, crtc);
1281 	const struct intel_limit *limit = &intel_limits_vlv;
1282 	int refclk = 100000;
1283 
1284 	if (!crtc_state->clock_set &&
1285 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1286 				refclk, NULL, &crtc_state->dpll))
1287 		return -EINVAL;
1288 
1289 	vlv_calc_dpll_params(refclk, &crtc_state->dpll);
1290 
1291 	vlv_compute_dpll(crtc_state);
1292 
1293 	/* FIXME this is a mess */
1294 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1295 		return 0;
1296 
1297 	crtc_state->port_clock = crtc_state->dpll.dot;
1298 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1299 
1300 	return 0;
1301 }
1302 
1303 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1304 				  struct intel_crtc *crtc)
1305 {
1306 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1307 	struct intel_crtc_state *crtc_state =
1308 		intel_atomic_get_new_crtc_state(state, crtc);
1309 	const struct intel_limit *limit;
1310 	int refclk = 96000;
1311 
1312 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1313 		if (intel_panel_use_ssc(dev_priv)) {
1314 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1315 			drm_dbg_kms(&dev_priv->drm,
1316 				    "using SSC reference clock of %d kHz\n",
1317 				    refclk);
1318 		}
1319 
1320 		if (intel_is_dual_link_lvds(dev_priv))
1321 			limit = &intel_limits_g4x_dual_channel_lvds;
1322 		else
1323 			limit = &intel_limits_g4x_single_channel_lvds;
1324 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1325 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1326 		limit = &intel_limits_g4x_hdmi;
1327 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1328 		limit = &intel_limits_g4x_sdvo;
1329 	} else {
1330 		/* The option is for other outputs */
1331 		limit = &intel_limits_i9xx_sdvo;
1332 	}
1333 
1334 	if (!crtc_state->clock_set &&
1335 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1336 				refclk, NULL, &crtc_state->dpll))
1337 		return -EINVAL;
1338 
1339 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1340 
1341 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1342 			  &crtc_state->dpll);
1343 
1344 	crtc_state->port_clock = crtc_state->dpll.dot;
1345 	/* FIXME this is a mess */
1346 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1347 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1348 
1349 	return 0;
1350 }
1351 
1352 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1353 				  struct intel_crtc *crtc)
1354 {
1355 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1356 	struct intel_crtc_state *crtc_state =
1357 		intel_atomic_get_new_crtc_state(state, crtc);
1358 	const struct intel_limit *limit;
1359 	int refclk = 96000;
1360 
1361 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1362 		if (intel_panel_use_ssc(dev_priv)) {
1363 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1364 			drm_dbg_kms(&dev_priv->drm,
1365 				    "using SSC reference clock of %d kHz\n",
1366 				    refclk);
1367 		}
1368 
1369 		limit = &pnv_limits_lvds;
1370 	} else {
1371 		limit = &pnv_limits_sdvo;
1372 	}
1373 
1374 	if (!crtc_state->clock_set &&
1375 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1376 				refclk, NULL, &crtc_state->dpll))
1377 		return -EINVAL;
1378 
1379 	pnv_calc_dpll_params(refclk, &crtc_state->dpll);
1380 
1381 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1382 			  &crtc_state->dpll);
1383 
1384 	crtc_state->port_clock = crtc_state->dpll.dot;
1385 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1386 
1387 	return 0;
1388 }
1389 
1390 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1391 				   struct intel_crtc *crtc)
1392 {
1393 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1394 	struct intel_crtc_state *crtc_state =
1395 		intel_atomic_get_new_crtc_state(state, crtc);
1396 	const struct intel_limit *limit;
1397 	int refclk = 96000;
1398 
1399 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1400 		if (intel_panel_use_ssc(dev_priv)) {
1401 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1402 			drm_dbg_kms(&dev_priv->drm,
1403 				    "using SSC reference clock of %d kHz\n",
1404 				    refclk);
1405 		}
1406 
1407 		limit = &intel_limits_i9xx_lvds;
1408 	} else {
1409 		limit = &intel_limits_i9xx_sdvo;
1410 	}
1411 
1412 	if (!crtc_state->clock_set &&
1413 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1414 				 refclk, NULL, &crtc_state->dpll))
1415 		return -EINVAL;
1416 
1417 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1418 
1419 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1420 			  &crtc_state->dpll);
1421 
1422 	crtc_state->port_clock = crtc_state->dpll.dot;
1423 	/* FIXME this is a mess */
1424 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1425 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1426 
1427 	return 0;
1428 }
1429 
1430 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1431 				   struct intel_crtc *crtc)
1432 {
1433 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1434 	struct intel_crtc_state *crtc_state =
1435 		intel_atomic_get_new_crtc_state(state, crtc);
1436 	const struct intel_limit *limit;
1437 	int refclk = 48000;
1438 
1439 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1440 		if (intel_panel_use_ssc(dev_priv)) {
1441 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1442 			drm_dbg_kms(&dev_priv->drm,
1443 				    "using SSC reference clock of %d kHz\n",
1444 				    refclk);
1445 		}
1446 
1447 		limit = &intel_limits_i8xx_lvds;
1448 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1449 		limit = &intel_limits_i8xx_dvo;
1450 	} else {
1451 		limit = &intel_limits_i8xx_dac;
1452 	}
1453 
1454 	if (!crtc_state->clock_set &&
1455 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1456 				 refclk, NULL, &crtc_state->dpll))
1457 		return -EINVAL;
1458 
1459 	i9xx_calc_dpll_params(refclk, &crtc_state->dpll);
1460 
1461 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1462 			  &crtc_state->dpll);
1463 
1464 	crtc_state->port_clock = crtc_state->dpll.dot;
1465 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1466 
1467 	return 0;
1468 }
1469 
1470 static const struct intel_dpll_funcs mtl_dpll_funcs = {
1471 	.crtc_compute_clock = mtl_crtc_compute_clock,
1472 };
1473 
1474 static const struct intel_dpll_funcs dg2_dpll_funcs = {
1475 	.crtc_compute_clock = dg2_crtc_compute_clock,
1476 };
1477 
1478 static const struct intel_dpll_funcs hsw_dpll_funcs = {
1479 	.crtc_compute_clock = hsw_crtc_compute_clock,
1480 	.crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
1481 };
1482 
1483 static const struct intel_dpll_funcs ilk_dpll_funcs = {
1484 	.crtc_compute_clock = ilk_crtc_compute_clock,
1485 	.crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
1486 };
1487 
1488 static const struct intel_dpll_funcs chv_dpll_funcs = {
1489 	.crtc_compute_clock = chv_crtc_compute_clock,
1490 };
1491 
1492 static const struct intel_dpll_funcs vlv_dpll_funcs = {
1493 	.crtc_compute_clock = vlv_crtc_compute_clock,
1494 };
1495 
1496 static const struct intel_dpll_funcs g4x_dpll_funcs = {
1497 	.crtc_compute_clock = g4x_crtc_compute_clock,
1498 };
1499 
1500 static const struct intel_dpll_funcs pnv_dpll_funcs = {
1501 	.crtc_compute_clock = pnv_crtc_compute_clock,
1502 };
1503 
1504 static const struct intel_dpll_funcs i9xx_dpll_funcs = {
1505 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1506 };
1507 
1508 static const struct intel_dpll_funcs i8xx_dpll_funcs = {
1509 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1510 };
1511 
1512 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1513 				  struct intel_crtc *crtc)
1514 {
1515 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1516 	struct intel_crtc_state *crtc_state =
1517 		intel_atomic_get_new_crtc_state(state, crtc);
1518 	int ret;
1519 
1520 	drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1521 
1522 	memset(&crtc_state->dpll_hw_state, 0,
1523 	       sizeof(crtc_state->dpll_hw_state));
1524 
1525 	if (!crtc_state->hw.enable)
1526 		return 0;
1527 
1528 	ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
1529 	if (ret) {
1530 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1531 			    crtc->base.base.id, crtc->base.name);
1532 		return ret;
1533 	}
1534 
1535 	return 0;
1536 }
1537 
1538 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
1539 				    struct intel_crtc *crtc)
1540 {
1541 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1542 	struct intel_crtc_state *crtc_state =
1543 		intel_atomic_get_new_crtc_state(state, crtc);
1544 	int ret;
1545 
1546 	drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1547 	drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
1548 
1549 	if (!crtc_state->hw.enable || crtc_state->shared_dpll)
1550 		return 0;
1551 
1552 	if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
1553 		return 0;
1554 
1555 	ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
1556 	if (ret) {
1557 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1558 			    crtc->base.base.id, crtc->base.name);
1559 		return ret;
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 void
1566 intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
1567 {
1568 	if (DISPLAY_VER(dev_priv) >= 14)
1569 		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
1570 	else if (IS_DG2(dev_priv))
1571 		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
1572 	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
1573 		dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
1574 	else if (HAS_PCH_SPLIT(dev_priv))
1575 		dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
1576 	else if (IS_CHERRYVIEW(dev_priv))
1577 		dev_priv->display.funcs.dpll = &chv_dpll_funcs;
1578 	else if (IS_VALLEYVIEW(dev_priv))
1579 		dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
1580 	else if (IS_G4X(dev_priv))
1581 		dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
1582 	else if (IS_PINEVIEW(dev_priv))
1583 		dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
1584 	else if (DISPLAY_VER(dev_priv) != 2)
1585 		dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
1586 	else
1587 		dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
1588 }
1589 
1590 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1591 {
1592 	if (IS_I830(dev_priv))
1593 		return false;
1594 
1595 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1596 }
1597 
1598 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1599 {
1600 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1601 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1602 	u32 dpll = crtc_state->dpll_hw_state.dpll;
1603 	enum pipe pipe = crtc->pipe;
1604 	int i;
1605 
1606 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1607 
1608 	/* PLL is protected by panel, make sure we can write it */
1609 	if (i9xx_has_pps(dev_priv))
1610 		assert_pps_unlocked(dev_priv, pipe);
1611 
1612 	intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
1613 	intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
1614 
1615 	/*
1616 	 * Apparently we need to have VGA mode enabled prior to changing
1617 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1618 	 * dividers, even though the register value does change.
1619 	 */
1620 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
1621 	intel_de_write(dev_priv, DPLL(pipe), dpll);
1622 
1623 	/* Wait for the clocks to stabilize. */
1624 	intel_de_posting_read(dev_priv, DPLL(pipe));
1625 	udelay(150);
1626 
1627 	if (DISPLAY_VER(dev_priv) >= 4) {
1628 		intel_de_write(dev_priv, DPLL_MD(pipe),
1629 			       crtc_state->dpll_hw_state.dpll_md);
1630 	} else {
1631 		/* The pixel multiplier can only be updated once the
1632 		 * DPLL is enabled and the clocks are stable.
1633 		 *
1634 		 * So write it again.
1635 		 */
1636 		intel_de_write(dev_priv, DPLL(pipe), dpll);
1637 	}
1638 
1639 	/* We do this three times for luck */
1640 	for (i = 0; i < 3; i++) {
1641 		intel_de_write(dev_priv, DPLL(pipe), dpll);
1642 		intel_de_posting_read(dev_priv, DPLL(pipe));
1643 		udelay(150); /* wait for warmup */
1644 	}
1645 }
1646 
1647 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
1648 				 enum pipe pipe)
1649 {
1650 	u32 reg_val;
1651 
1652 	/*
1653 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1654 	 * and set it to a reasonable value instead.
1655 	 */
1656 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
1657 	reg_val &= 0xffffff00;
1658 	reg_val |= 0x00000030;
1659 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
1660 
1661 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
1662 	reg_val &= 0x00ffffff;
1663 	reg_val |= 0x8c000000;
1664 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
1665 
1666 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
1667 	reg_val &= 0xffffff00;
1668 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
1669 
1670 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
1671 	reg_val &= 0x00ffffff;
1672 	reg_val |= 0xb0000000;
1673 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
1674 }
1675 
1676 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1677 {
1678 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1679 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1680 	enum pipe pipe = crtc->pipe;
1681 	u32 mdiv;
1682 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
1683 	u32 coreclk, reg_val;
1684 
1685 	vlv_dpio_get(dev_priv);
1686 
1687 	bestn = crtc_state->dpll.n;
1688 	bestm1 = crtc_state->dpll.m1;
1689 	bestm2 = crtc_state->dpll.m2;
1690 	bestp1 = crtc_state->dpll.p1;
1691 	bestp2 = crtc_state->dpll.p2;
1692 
1693 	/* See eDP HDMI DPIO driver vbios notes doc */
1694 
1695 	/* PLL B needs special handling */
1696 	if (pipe == PIPE_B)
1697 		vlv_pllb_recal_opamp(dev_priv, pipe);
1698 
1699 	/* Set up Tx target for periodic Rcomp update */
1700 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
1701 
1702 	/* Disable target IRef on PLL */
1703 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
1704 	reg_val &= 0x00ffffff;
1705 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
1706 
1707 	/* Disable fast lock */
1708 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
1709 
1710 	/* Set idtafcrecal before PLL is enabled */
1711 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
1712 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
1713 	mdiv |= ((bestn << DPIO_N_SHIFT));
1714 	mdiv |= (1 << DPIO_K_SHIFT);
1715 
1716 	/*
1717 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1718 	 * but we don't support that).
1719 	 * Note: don't use the DAC post divider as it seems unstable.
1720 	 */
1721 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
1722 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
1723 
1724 	mdiv |= DPIO_ENABLE_CALIBRATION;
1725 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
1726 
1727 	/* Set HBR and RBR LPF coefficients */
1728 	if (crtc_state->port_clock == 162000 ||
1729 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1730 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1731 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
1732 				 0x009f0003);
1733 	else
1734 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
1735 				 0x00d0000f);
1736 
1737 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1738 		/* Use SSC source */
1739 		if (pipe == PIPE_A)
1740 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1741 					 0x0df40000);
1742 		else
1743 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1744 					 0x0df70000);
1745 	} else { /* HDMI or VGA */
1746 		/* Use bend source */
1747 		if (pipe == PIPE_A)
1748 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1749 					 0x0df70000);
1750 		else
1751 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1752 					 0x0df40000);
1753 	}
1754 
1755 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
1756 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1757 	if (intel_crtc_has_dp_encoder(crtc_state))
1758 		coreclk |= 0x01000000;
1759 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
1760 
1761 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
1762 
1763 	vlv_dpio_put(dev_priv);
1764 }
1765 
1766 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1767 {
1768 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1769 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1770 	enum pipe pipe = crtc->pipe;
1771 
1772 	intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
1773 	intel_de_posting_read(dev_priv, DPLL(pipe));
1774 	udelay(150);
1775 
1776 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1777 		drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1778 }
1779 
1780 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1781 {
1782 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1783 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1784 	enum pipe pipe = crtc->pipe;
1785 
1786 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1787 
1788 	/* PLL is protected by panel, make sure we can write it */
1789 	assert_pps_unlocked(dev_priv, pipe);
1790 
1791 	/* Enable Refclk */
1792 	intel_de_write(dev_priv, DPLL(pipe),
1793 		       crtc_state->dpll_hw_state.dpll &
1794 		       ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
1795 
1796 	if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
1797 		vlv_prepare_pll(crtc_state);
1798 		_vlv_enable_pll(crtc_state);
1799 	}
1800 
1801 	intel_de_write(dev_priv, DPLL_MD(pipe),
1802 		       crtc_state->dpll_hw_state.dpll_md);
1803 	intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1804 }
1805 
1806 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
1807 {
1808 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1809 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1810 	enum pipe pipe = crtc->pipe;
1811 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1812 	u32 loopfilter, tribuf_calcntr;
1813 	u32 bestm2, bestp1, bestp2, bestm2_frac;
1814 	u32 dpio_val;
1815 	int vco;
1816 
1817 	bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
1818 	bestm2 = crtc_state->dpll.m2 >> 22;
1819 	bestp1 = crtc_state->dpll.p1;
1820 	bestp2 = crtc_state->dpll.p2;
1821 	vco = crtc_state->dpll.vco;
1822 	dpio_val = 0;
1823 	loopfilter = 0;
1824 
1825 	vlv_dpio_get(dev_priv);
1826 
1827 	/* p1 and p2 divider */
1828 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
1829 			5 << DPIO_CHV_S1_DIV_SHIFT |
1830 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
1831 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
1832 			1 << DPIO_CHV_K_DIV_SHIFT);
1833 
1834 	/* Feedback post-divider - m2 */
1835 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
1836 
1837 	/* Feedback refclk divider - n and m1 */
1838 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
1839 			DPIO_CHV_M1_DIV_BY_2 |
1840 			1 << DPIO_CHV_N_DIV_SHIFT);
1841 
1842 	/* M2 fraction division */
1843 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
1844 
1845 	/* M2 fraction division enable */
1846 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
1847 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
1848 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
1849 	if (bestm2_frac)
1850 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
1851 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
1852 
1853 	/* Program digital lock detect threshold */
1854 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
1855 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
1856 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
1857 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
1858 	if (!bestm2_frac)
1859 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
1860 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
1861 
1862 	/* Loop filter */
1863 	if (vco == 5400000) {
1864 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
1865 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
1866 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
1867 		tribuf_calcntr = 0x9;
1868 	} else if (vco <= 6200000) {
1869 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
1870 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
1871 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
1872 		tribuf_calcntr = 0x9;
1873 	} else if (vco <= 6480000) {
1874 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
1875 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
1876 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
1877 		tribuf_calcntr = 0x8;
1878 	} else {
1879 		/* Not supported. Apply the same limits as in the max case */
1880 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
1881 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
1882 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
1883 		tribuf_calcntr = 0;
1884 	}
1885 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
1886 
1887 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
1888 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
1889 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
1890 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
1891 
1892 	/* AFC Recal */
1893 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
1894 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
1895 			DPIO_AFC_RECAL);
1896 
1897 	vlv_dpio_put(dev_priv);
1898 }
1899 
1900 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
1901 {
1902 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1903 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1904 	enum pipe pipe = crtc->pipe;
1905 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1906 	u32 tmp;
1907 
1908 	vlv_dpio_get(dev_priv);
1909 
1910 	/* Enable back the 10bit clock to display controller */
1911 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1912 	tmp |= DPIO_DCLKP_EN;
1913 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1914 
1915 	vlv_dpio_put(dev_priv);
1916 
1917 	/*
1918 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1919 	 */
1920 	udelay(1);
1921 
1922 	/* Enable PLL */
1923 	intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
1924 
1925 	/* Check PLL is locked */
1926 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1927 		drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1928 }
1929 
1930 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
1931 {
1932 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1933 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1934 	enum pipe pipe = crtc->pipe;
1935 
1936 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1937 
1938 	/* PLL is protected by panel, make sure we can write it */
1939 	assert_pps_unlocked(dev_priv, pipe);
1940 
1941 	/* Enable Refclk and SSC */
1942 	intel_de_write(dev_priv, DPLL(pipe),
1943 		       crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
1944 
1945 	if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
1946 		chv_prepare_pll(crtc_state);
1947 		_chv_enable_pll(crtc_state);
1948 	}
1949 
1950 	if (pipe != PIPE_A) {
1951 		/*
1952 		 * WaPixelRepeatModeFixForC0:chv
1953 		 *
1954 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1955 		 * the value from DPLLBMD to either pipe B or C.
1956 		 */
1957 		intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1958 		intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1959 			       crtc_state->dpll_hw_state.dpll_md);
1960 		intel_de_write(dev_priv, CBR4_VLV, 0);
1961 		dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
1962 
1963 		/*
1964 		 * DPLLB VGA mode also seems to cause problems.
1965 		 * We should always have it disabled.
1966 		 */
1967 		drm_WARN_ON(&dev_priv->drm,
1968 			    (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1969 			     DPLL_VGA_MODE_DIS) == 0);
1970 	} else {
1971 		intel_de_write(dev_priv, DPLL_MD(pipe),
1972 			       crtc_state->dpll_hw_state.dpll_md);
1973 		intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1974 	}
1975 }
1976 
1977 /**
1978  * vlv_force_pll_on - forcibly enable just the PLL
1979  * @dev_priv: i915 private structure
1980  * @pipe: pipe PLL to enable
1981  * @dpll: PLL configuration
1982  *
1983  * Enable the PLL for @pipe using the supplied @dpll config. To be used
1984  * in cases where we need the PLL enabled even when @pipe is not going to
1985  * be enabled.
1986  */
1987 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
1988 		     const struct dpll *dpll)
1989 {
1990 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1991 	struct intel_crtc_state *crtc_state;
1992 
1993 	crtc_state = intel_crtc_state_alloc(crtc);
1994 	if (!crtc_state)
1995 		return -ENOMEM;
1996 
1997 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
1998 	crtc_state->pixel_multiplier = 1;
1999 	crtc_state->dpll = *dpll;
2000 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
2001 
2002 	if (IS_CHERRYVIEW(dev_priv)) {
2003 		chv_compute_dpll(crtc_state);
2004 		chv_enable_pll(crtc_state);
2005 	} else {
2006 		vlv_compute_dpll(crtc_state);
2007 		vlv_enable_pll(crtc_state);
2008 	}
2009 
2010 	intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
2011 
2012 	return 0;
2013 }
2014 
2015 void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2016 {
2017 	u32 val;
2018 
2019 	/* Make sure the pipe isn't still relying on us */
2020 	assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2021 
2022 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2023 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2024 	if (pipe != PIPE_A)
2025 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2026 
2027 	intel_de_write(dev_priv, DPLL(pipe), val);
2028 	intel_de_posting_read(dev_priv, DPLL(pipe));
2029 }
2030 
2031 void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2032 {
2033 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
2034 	u32 val;
2035 
2036 	/* Make sure the pipe isn't still relying on us */
2037 	assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2038 
2039 	val = DPLL_SSC_REF_CLK_CHV |
2040 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2041 	if (pipe != PIPE_A)
2042 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2043 
2044 	intel_de_write(dev_priv, DPLL(pipe), val);
2045 	intel_de_posting_read(dev_priv, DPLL(pipe));
2046 
2047 	vlv_dpio_get(dev_priv);
2048 
2049 	/* Disable 10bit clock to display controller */
2050 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
2051 	val &= ~DPIO_DCLKP_EN;
2052 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
2053 
2054 	vlv_dpio_put(dev_priv);
2055 }
2056 
2057 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2058 {
2059 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2060 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2061 	enum pipe pipe = crtc->pipe;
2062 
2063 	/* Don't disable pipe or pipe PLLs if needed */
2064 	if (IS_I830(dev_priv))
2065 		return;
2066 
2067 	/* Make sure the pipe isn't still relying on us */
2068 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2069 
2070 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
2071 	intel_de_posting_read(dev_priv, DPLL(pipe));
2072 }
2073 
2074 
2075 /**
2076  * vlv_force_pll_off - forcibly disable just the PLL
2077  * @dev_priv: i915 private structure
2078  * @pipe: pipe PLL to disable
2079  *
2080  * Disable the PLL for @pipe. To be used in cases where we need
2081  * the PLL enabled even when @pipe is not going to be enabled.
2082  */
2083 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
2084 {
2085 	if (IS_CHERRYVIEW(dev_priv))
2086 		chv_disable_pll(dev_priv, pipe);
2087 	else
2088 		vlv_disable_pll(dev_priv, pipe);
2089 }
2090 
2091 /* Only for pre-ILK configs */
2092 static void assert_pll(struct drm_i915_private *dev_priv,
2093 		       enum pipe pipe, bool state)
2094 {
2095 	bool cur_state;
2096 
2097 	cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
2098 	I915_STATE_WARN(dev_priv, cur_state != state,
2099 			"PLL state assertion failure (expected %s, current %s)\n",
2100 			str_on_off(state), str_on_off(cur_state));
2101 }
2102 
2103 void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
2104 {
2105 	assert_pll(i915, pipe, true);
2106 }
2107 
2108 void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
2109 {
2110 	assert_pll(i915, pipe, false);
2111 }
2112