xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision c5ab54e9945b5f3dc8e9c31b93bb334fcea126f4)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 static void
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 				  struct intel_shared_dpll_state *shared_dpll)
51 {
52 	enum intel_dpll_id i;
53 
54 	/* Copy shared dpll state */
55 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
57 
58 		shared_dpll[i] = pll->state;
59 	}
60 }
61 
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
64 {
65 	struct intel_atomic_state *state = to_intel_atomic_state(s);
66 
67 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
68 
69 	if (!state->dpll_set) {
70 		state->dpll_set = true;
71 
72 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 						  state->shared_dpll);
74 	}
75 
76 	return state->shared_dpll;
77 }
78 
79 /**
80  * intel_get_shared_dpll_by_id - get a DPLL given its id
81  * @dev_priv: i915 device instance
82  * @id: pll id
83  *
84  * Returns:
85  * A pointer to the DPLL with @id
86  */
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 			    enum intel_dpll_id id)
90 {
91 	return &dev_priv->shared_dplls[id];
92 }
93 
94 /**
95  * intel_get_shared_dpll_id - get the id of a DPLL
96  * @dev_priv: i915 device instance
97  * @pll: the DPLL
98  *
99  * Returns:
100  * The id of @pll
101  */
102 enum intel_dpll_id
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 			 struct intel_shared_dpll *pll)
105 {
106 	if (WARN_ON(pll < dev_priv->shared_dplls||
107 		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
108 		return -1;
109 
110 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 }
112 
113 /* For ILK+ */
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 			struct intel_shared_dpll *pll,
116 			bool state)
117 {
118 	bool cur_state;
119 	struct intel_dpll_hw_state hw_state;
120 
121 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
122 		return;
123 
124 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
125 	I915_STATE_WARN(cur_state != state,
126 	     "%s assertion failure (expected %s, current %s)\n",
127 			pll->info->name, onoff(state), onoff(cur_state));
128 }
129 
130 /**
131  * intel_prepare_shared_dpll - call a dpll's prepare hook
132  * @crtc_state: CRTC, and its state, which has a shared dpll
133  *
134  * This calls the PLL's prepare hook if it has one and if the PLL is not
135  * already enabled. The prepare hook is platform specific.
136  */
137 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
138 {
139 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
140 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
141 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
142 
143 	if (WARN_ON(pll == NULL))
144 		return;
145 
146 	mutex_lock(&dev_priv->dpll_lock);
147 	WARN_ON(!pll->state.crtc_mask);
148 	if (!pll->active_mask) {
149 		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
150 		WARN_ON(pll->on);
151 		assert_shared_dpll_disabled(dev_priv, pll);
152 
153 		pll->info->funcs->prepare(dev_priv, pll);
154 	}
155 	mutex_unlock(&dev_priv->dpll_lock);
156 }
157 
158 /**
159  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
160  * @crtc_state: CRTC, and its state, which has a shared DPLL
161  *
162  * Enable the shared DPLL used by @crtc.
163  */
164 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
165 {
166 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
167 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
168 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
169 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
170 	unsigned int old_mask;
171 
172 	if (WARN_ON(pll == NULL))
173 		return;
174 
175 	mutex_lock(&dev_priv->dpll_lock);
176 	old_mask = pll->active_mask;
177 
178 	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
179 	    WARN_ON(pll->active_mask & crtc_mask))
180 		goto out;
181 
182 	pll->active_mask |= crtc_mask;
183 
184 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
185 		      pll->info->name, pll->active_mask, pll->on,
186 		      crtc->base.base.id);
187 
188 	if (old_mask) {
189 		WARN_ON(!pll->on);
190 		assert_shared_dpll_enabled(dev_priv, pll);
191 		goto out;
192 	}
193 	WARN_ON(pll->on);
194 
195 	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
196 	pll->info->funcs->enable(dev_priv, pll);
197 	pll->on = true;
198 
199 out:
200 	mutex_unlock(&dev_priv->dpll_lock);
201 }
202 
203 /**
204  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
205  * @crtc_state: CRTC, and its state, which has a shared DPLL
206  *
207  * Disable the shared DPLL used by @crtc.
208  */
209 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
210 {
211 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
212 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
213 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
214 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
215 
216 	/* PCH only available on ILK+ */
217 	if (INTEL_GEN(dev_priv) < 5)
218 		return;
219 
220 	if (pll == NULL)
221 		return;
222 
223 	mutex_lock(&dev_priv->dpll_lock);
224 	if (WARN_ON(!(pll->active_mask & crtc_mask)))
225 		goto out;
226 
227 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
228 		      pll->info->name, pll->active_mask, pll->on,
229 		      crtc->base.base.id);
230 
231 	assert_shared_dpll_enabled(dev_priv, pll);
232 	WARN_ON(!pll->on);
233 
234 	pll->active_mask &= ~crtc_mask;
235 	if (pll->active_mask)
236 		goto out;
237 
238 	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
239 	pll->info->funcs->disable(dev_priv, pll);
240 	pll->on = false;
241 
242 out:
243 	mutex_unlock(&dev_priv->dpll_lock);
244 }
245 
246 static struct intel_shared_dpll *
247 intel_find_shared_dpll(struct intel_atomic_state *state,
248 		       const struct intel_crtc *crtc,
249 		       const struct intel_dpll_hw_state *pll_state,
250 		       enum intel_dpll_id range_min,
251 		       enum intel_dpll_id range_max)
252 {
253 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
254 	struct intel_shared_dpll *pll, *unused_pll = NULL;
255 	struct intel_shared_dpll_state *shared_dpll;
256 	enum intel_dpll_id i;
257 
258 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
259 
260 	for (i = range_min; i <= range_max; i++) {
261 		pll = &dev_priv->shared_dplls[i];
262 
263 		/* Only want to check enabled timings first */
264 		if (shared_dpll[i].crtc_mask == 0) {
265 			if (!unused_pll)
266 				unused_pll = pll;
267 			continue;
268 		}
269 
270 		if (memcmp(pll_state,
271 			   &shared_dpll[i].hw_state,
272 			   sizeof(*pll_state)) == 0) {
273 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
274 				      crtc->base.base.id, crtc->base.name,
275 				      pll->info->name,
276 				      shared_dpll[i].crtc_mask,
277 				      pll->active_mask);
278 			return pll;
279 		}
280 	}
281 
282 	/* Ok no matching timings, maybe there's a free one? */
283 	if (unused_pll) {
284 		DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
285 			      crtc->base.base.id, crtc->base.name,
286 			      unused_pll->info->name);
287 		return unused_pll;
288 	}
289 
290 	return NULL;
291 }
292 
293 static void
294 intel_reference_shared_dpll(struct intel_atomic_state *state,
295 			    const struct intel_crtc *crtc,
296 			    const struct intel_shared_dpll *pll,
297 			    const struct intel_dpll_hw_state *pll_state)
298 {
299 	struct intel_shared_dpll_state *shared_dpll;
300 	const enum intel_dpll_id id = pll->info->id;
301 
302 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
303 
304 	if (shared_dpll[id].crtc_mask == 0)
305 		shared_dpll[id].hw_state = *pll_state;
306 
307 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
308 			 pipe_name(crtc->pipe));
309 
310 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
311 }
312 
313 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
314 					  const struct intel_crtc *crtc,
315 					  const struct intel_shared_dpll *pll)
316 {
317 	struct intel_shared_dpll_state *shared_dpll;
318 
319 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
320 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
321 }
322 
323 static void intel_put_dpll(struct intel_atomic_state *state,
324 			   struct intel_crtc *crtc)
325 {
326 	const struct intel_crtc_state *old_crtc_state =
327 		intel_atomic_get_old_crtc_state(state, crtc);
328 	struct intel_crtc_state *new_crtc_state =
329 		intel_atomic_get_new_crtc_state(state, crtc);
330 
331 	new_crtc_state->shared_dpll = NULL;
332 
333 	if (!old_crtc_state->shared_dpll)
334 		return;
335 
336 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
337 }
338 
339 /**
340  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
341  * @state: atomic state
342  *
343  * This is the dpll version of drm_atomic_helper_swap_state() since the
344  * helper does not handle driver-specific global state.
345  *
346  * For consistency with atomic helpers this function does a complete swap,
347  * i.e. it also puts the current state into @state, even though there is no
348  * need for that at this moment.
349  */
350 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
351 {
352 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
353 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
354 	enum intel_dpll_id i;
355 
356 	if (!state->dpll_set)
357 		return;
358 
359 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
360 		struct intel_shared_dpll *pll =
361 			&dev_priv->shared_dplls[i];
362 
363 		swap(pll->state, shared_dpll[i]);
364 	}
365 }
366 
367 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
368 				      struct intel_shared_dpll *pll,
369 				      struct intel_dpll_hw_state *hw_state)
370 {
371 	const enum intel_dpll_id id = pll->info->id;
372 	intel_wakeref_t wakeref;
373 	u32 val;
374 
375 	wakeref = intel_display_power_get_if_enabled(dev_priv,
376 						     POWER_DOMAIN_DISPLAY_CORE);
377 	if (!wakeref)
378 		return false;
379 
380 	val = I915_READ(PCH_DPLL(id));
381 	hw_state->dpll = val;
382 	hw_state->fp0 = I915_READ(PCH_FP0(id));
383 	hw_state->fp1 = I915_READ(PCH_FP1(id));
384 
385 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
386 
387 	return val & DPLL_VCO_ENABLE;
388 }
389 
390 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
391 				 struct intel_shared_dpll *pll)
392 {
393 	const enum intel_dpll_id id = pll->info->id;
394 
395 	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
396 	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
397 }
398 
399 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
400 {
401 	u32 val;
402 	bool enabled;
403 
404 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
405 
406 	val = I915_READ(PCH_DREF_CONTROL);
407 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
408 			    DREF_SUPERSPREAD_SOURCE_MASK));
409 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
410 }
411 
412 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
413 				struct intel_shared_dpll *pll)
414 {
415 	const enum intel_dpll_id id = pll->info->id;
416 
417 	/* PCH refclock must be enabled first */
418 	ibx_assert_pch_refclk_enabled(dev_priv);
419 
420 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
421 
422 	/* Wait for the clocks to stabilize. */
423 	POSTING_READ(PCH_DPLL(id));
424 	udelay(150);
425 
426 	/* The pixel multiplier can only be updated once the
427 	 * DPLL is enabled and the clocks are stable.
428 	 *
429 	 * So write it again.
430 	 */
431 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
432 	POSTING_READ(PCH_DPLL(id));
433 	udelay(200);
434 }
435 
436 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
437 				 struct intel_shared_dpll *pll)
438 {
439 	const enum intel_dpll_id id = pll->info->id;
440 
441 	I915_WRITE(PCH_DPLL(id), 0);
442 	POSTING_READ(PCH_DPLL(id));
443 	udelay(200);
444 }
445 
446 static bool ibx_get_dpll(struct intel_atomic_state *state,
447 			 struct intel_crtc *crtc,
448 			 struct intel_encoder *encoder)
449 {
450 	struct intel_crtc_state *crtc_state =
451 		intel_atomic_get_new_crtc_state(state, crtc);
452 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
453 	struct intel_shared_dpll *pll;
454 	enum intel_dpll_id i;
455 
456 	if (HAS_PCH_IBX(dev_priv)) {
457 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
458 		i = (enum intel_dpll_id) crtc->pipe;
459 		pll = &dev_priv->shared_dplls[i];
460 
461 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
462 			      crtc->base.base.id, crtc->base.name,
463 			      pll->info->name);
464 	} else {
465 		pll = intel_find_shared_dpll(state, crtc,
466 					     &crtc_state->dpll_hw_state,
467 					     DPLL_ID_PCH_PLL_A,
468 					     DPLL_ID_PCH_PLL_B);
469 	}
470 
471 	if (!pll)
472 		return false;
473 
474 	/* reference the pll */
475 	intel_reference_shared_dpll(state, crtc,
476 				    pll, &crtc_state->dpll_hw_state);
477 
478 	crtc_state->shared_dpll = pll;
479 
480 	return true;
481 }
482 
483 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
484 			      const struct intel_dpll_hw_state *hw_state)
485 {
486 	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
487 		      "fp0: 0x%x, fp1: 0x%x\n",
488 		      hw_state->dpll,
489 		      hw_state->dpll_md,
490 		      hw_state->fp0,
491 		      hw_state->fp1);
492 }
493 
494 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
495 	.prepare = ibx_pch_dpll_prepare,
496 	.enable = ibx_pch_dpll_enable,
497 	.disable = ibx_pch_dpll_disable,
498 	.get_hw_state = ibx_pch_dpll_get_hw_state,
499 };
500 
501 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
502 			       struct intel_shared_dpll *pll)
503 {
504 	const enum intel_dpll_id id = pll->info->id;
505 
506 	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
507 	POSTING_READ(WRPLL_CTL(id));
508 	udelay(20);
509 }
510 
511 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
512 				struct intel_shared_dpll *pll)
513 {
514 	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
515 	POSTING_READ(SPLL_CTL);
516 	udelay(20);
517 }
518 
519 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
520 				  struct intel_shared_dpll *pll)
521 {
522 	const enum intel_dpll_id id = pll->info->id;
523 	u32 val;
524 
525 	val = I915_READ(WRPLL_CTL(id));
526 	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
527 	POSTING_READ(WRPLL_CTL(id));
528 
529 	/*
530 	 * Try to set up the PCH reference clock once all DPLLs
531 	 * that depend on it have been shut down.
532 	 */
533 	if (dev_priv->pch_ssc_use & BIT(id))
534 		intel_init_pch_refclk(dev_priv);
535 }
536 
537 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
538 				 struct intel_shared_dpll *pll)
539 {
540 	enum intel_dpll_id id = pll->info->id;
541 	u32 val;
542 
543 	val = I915_READ(SPLL_CTL);
544 	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
545 	POSTING_READ(SPLL_CTL);
546 
547 	/*
548 	 * Try to set up the PCH reference clock once all DPLLs
549 	 * that depend on it have been shut down.
550 	 */
551 	if (dev_priv->pch_ssc_use & BIT(id))
552 		intel_init_pch_refclk(dev_priv);
553 }
554 
555 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
556 				       struct intel_shared_dpll *pll,
557 				       struct intel_dpll_hw_state *hw_state)
558 {
559 	const enum intel_dpll_id id = pll->info->id;
560 	intel_wakeref_t wakeref;
561 	u32 val;
562 
563 	wakeref = intel_display_power_get_if_enabled(dev_priv,
564 						     POWER_DOMAIN_DISPLAY_CORE);
565 	if (!wakeref)
566 		return false;
567 
568 	val = I915_READ(WRPLL_CTL(id));
569 	hw_state->wrpll = val;
570 
571 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
572 
573 	return val & WRPLL_PLL_ENABLE;
574 }
575 
576 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
577 				      struct intel_shared_dpll *pll,
578 				      struct intel_dpll_hw_state *hw_state)
579 {
580 	intel_wakeref_t wakeref;
581 	u32 val;
582 
583 	wakeref = intel_display_power_get_if_enabled(dev_priv,
584 						     POWER_DOMAIN_DISPLAY_CORE);
585 	if (!wakeref)
586 		return false;
587 
588 	val = I915_READ(SPLL_CTL);
589 	hw_state->spll = val;
590 
591 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
592 
593 	return val & SPLL_PLL_ENABLE;
594 }
595 
596 #define LC_FREQ 2700
597 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
598 
599 #define P_MIN 2
600 #define P_MAX 64
601 #define P_INC 2
602 
603 /* Constraints for PLL good behavior */
604 #define REF_MIN 48
605 #define REF_MAX 400
606 #define VCO_MIN 2400
607 #define VCO_MAX 4800
608 
609 struct hsw_wrpll_rnp {
610 	unsigned p, n2, r2;
611 };
612 
613 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
614 {
615 	unsigned budget;
616 
617 	switch (clock) {
618 	case 25175000:
619 	case 25200000:
620 	case 27000000:
621 	case 27027000:
622 	case 37762500:
623 	case 37800000:
624 	case 40500000:
625 	case 40541000:
626 	case 54000000:
627 	case 54054000:
628 	case 59341000:
629 	case 59400000:
630 	case 72000000:
631 	case 74176000:
632 	case 74250000:
633 	case 81000000:
634 	case 81081000:
635 	case 89012000:
636 	case 89100000:
637 	case 108000000:
638 	case 108108000:
639 	case 111264000:
640 	case 111375000:
641 	case 148352000:
642 	case 148500000:
643 	case 162000000:
644 	case 162162000:
645 	case 222525000:
646 	case 222750000:
647 	case 296703000:
648 	case 297000000:
649 		budget = 0;
650 		break;
651 	case 233500000:
652 	case 245250000:
653 	case 247750000:
654 	case 253250000:
655 	case 298000000:
656 		budget = 1500;
657 		break;
658 	case 169128000:
659 	case 169500000:
660 	case 179500000:
661 	case 202000000:
662 		budget = 2000;
663 		break;
664 	case 256250000:
665 	case 262500000:
666 	case 270000000:
667 	case 272500000:
668 	case 273750000:
669 	case 280750000:
670 	case 281250000:
671 	case 286000000:
672 	case 291750000:
673 		budget = 4000;
674 		break;
675 	case 267250000:
676 	case 268500000:
677 		budget = 5000;
678 		break;
679 	default:
680 		budget = 1000;
681 		break;
682 	}
683 
684 	return budget;
685 }
686 
687 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
688 				 unsigned int r2, unsigned int n2,
689 				 unsigned int p,
690 				 struct hsw_wrpll_rnp *best)
691 {
692 	u64 a, b, c, d, diff, diff_best;
693 
694 	/* No best (r,n,p) yet */
695 	if (best->p == 0) {
696 		best->p = p;
697 		best->n2 = n2;
698 		best->r2 = r2;
699 		return;
700 	}
701 
702 	/*
703 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
704 	 * freq2k.
705 	 *
706 	 * delta = 1e6 *
707 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
708 	 *	   freq2k;
709 	 *
710 	 * and we would like delta <= budget.
711 	 *
712 	 * If the discrepancy is above the PPM-based budget, always prefer to
713 	 * improve upon the previous solution.  However, if you're within the
714 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
715 	 */
716 	a = freq2k * budget * p * r2;
717 	b = freq2k * budget * best->p * best->r2;
718 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
719 	diff_best = abs_diff(freq2k * best->p * best->r2,
720 			     LC_FREQ_2K * best->n2);
721 	c = 1000000 * diff;
722 	d = 1000000 * diff_best;
723 
724 	if (a < c && b < d) {
725 		/* If both are above the budget, pick the closer */
726 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
727 			best->p = p;
728 			best->n2 = n2;
729 			best->r2 = r2;
730 		}
731 	} else if (a >= c && b < d) {
732 		/* If A is below the threshold but B is above it?  Update. */
733 		best->p = p;
734 		best->n2 = n2;
735 		best->r2 = r2;
736 	} else if (a >= c && b >= d) {
737 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
738 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
739 			best->p = p;
740 			best->n2 = n2;
741 			best->r2 = r2;
742 		}
743 	}
744 	/* Otherwise a < c && b >= d, do nothing */
745 }
746 
747 static void
748 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
749 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
750 {
751 	u64 freq2k;
752 	unsigned p, n2, r2;
753 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
754 	unsigned budget;
755 
756 	freq2k = clock / 100;
757 
758 	budget = hsw_wrpll_get_budget_for_freq(clock);
759 
760 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
761 	 * and directly pass the LC PLL to it. */
762 	if (freq2k == 5400000) {
763 		*n2_out = 2;
764 		*p_out = 1;
765 		*r2_out = 2;
766 		return;
767 	}
768 
769 	/*
770 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
771 	 * the WR PLL.
772 	 *
773 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
774 	 * Injecting R2 = 2 * R gives:
775 	 *   REF_MAX * r2 > LC_FREQ * 2 and
776 	 *   REF_MIN * r2 < LC_FREQ * 2
777 	 *
778 	 * Which means the desired boundaries for r2 are:
779 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
780 	 *
781 	 */
782 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
783 	     r2 <= LC_FREQ * 2 / REF_MIN;
784 	     r2++) {
785 
786 		/*
787 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
788 		 *
789 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
790 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
791 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
792 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
793 		 *
794 		 * Which means the desired boundaries for n2 are:
795 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
796 		 */
797 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
798 		     n2 <= VCO_MAX * r2 / LC_FREQ;
799 		     n2++) {
800 
801 			for (p = P_MIN; p <= P_MAX; p += P_INC)
802 				hsw_wrpll_update_rnp(freq2k, budget,
803 						     r2, n2, p, &best);
804 		}
805 	}
806 
807 	*n2_out = best.n2;
808 	*p_out = best.p;
809 	*r2_out = best.r2;
810 }
811 
812 static struct intel_shared_dpll *
813 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
814 		      struct intel_crtc *crtc)
815 {
816 	struct intel_crtc_state *crtc_state =
817 		intel_atomic_get_new_crtc_state(state, crtc);
818 	struct intel_shared_dpll *pll;
819 	u32 val;
820 	unsigned int p, n2, r2;
821 
822 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
823 
824 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
825 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
826 	      WRPLL_DIVIDER_POST(p);
827 
828 	crtc_state->dpll_hw_state.wrpll = val;
829 
830 	pll = intel_find_shared_dpll(state, crtc,
831 				     &crtc_state->dpll_hw_state,
832 				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
833 
834 	if (!pll)
835 		return NULL;
836 
837 	return pll;
838 }
839 
840 static struct intel_shared_dpll *
841 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
842 {
843 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
844 	struct intel_shared_dpll *pll;
845 	enum intel_dpll_id pll_id;
846 	int clock = crtc_state->port_clock;
847 
848 	switch (clock / 2) {
849 	case 81000:
850 		pll_id = DPLL_ID_LCPLL_810;
851 		break;
852 	case 135000:
853 		pll_id = DPLL_ID_LCPLL_1350;
854 		break;
855 	case 270000:
856 		pll_id = DPLL_ID_LCPLL_2700;
857 		break;
858 	default:
859 		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
860 		return NULL;
861 	}
862 
863 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
864 
865 	if (!pll)
866 		return NULL;
867 
868 	return pll;
869 }
870 
871 static bool hsw_get_dpll(struct intel_atomic_state *state,
872 			 struct intel_crtc *crtc,
873 			 struct intel_encoder *encoder)
874 {
875 	struct intel_crtc_state *crtc_state =
876 		intel_atomic_get_new_crtc_state(state, crtc);
877 	struct intel_shared_dpll *pll;
878 
879 	memset(&crtc_state->dpll_hw_state, 0,
880 	       sizeof(crtc_state->dpll_hw_state));
881 
882 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
883 		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
884 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
885 		pll = hsw_ddi_dp_get_dpll(crtc_state);
886 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
887 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
888 			return false;
889 
890 		crtc_state->dpll_hw_state.spll =
891 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
892 
893 		pll = intel_find_shared_dpll(state, crtc,
894 					     &crtc_state->dpll_hw_state,
895 					     DPLL_ID_SPLL, DPLL_ID_SPLL);
896 	} else {
897 		return false;
898 	}
899 
900 	if (!pll)
901 		return false;
902 
903 	intel_reference_shared_dpll(state, crtc,
904 				    pll, &crtc_state->dpll_hw_state);
905 
906 	crtc_state->shared_dpll = pll;
907 
908 	return true;
909 }
910 
911 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
912 			      const struct intel_dpll_hw_state *hw_state)
913 {
914 	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
915 		      hw_state->wrpll, hw_state->spll);
916 }
917 
918 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
919 	.enable = hsw_ddi_wrpll_enable,
920 	.disable = hsw_ddi_wrpll_disable,
921 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
922 };
923 
924 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
925 	.enable = hsw_ddi_spll_enable,
926 	.disable = hsw_ddi_spll_disable,
927 	.get_hw_state = hsw_ddi_spll_get_hw_state,
928 };
929 
930 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
931 				 struct intel_shared_dpll *pll)
932 {
933 }
934 
935 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
936 				  struct intel_shared_dpll *pll)
937 {
938 }
939 
940 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
941 				       struct intel_shared_dpll *pll,
942 				       struct intel_dpll_hw_state *hw_state)
943 {
944 	return true;
945 }
946 
947 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
948 	.enable = hsw_ddi_lcpll_enable,
949 	.disable = hsw_ddi_lcpll_disable,
950 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
951 };
952 
953 struct skl_dpll_regs {
954 	i915_reg_t ctl, cfgcr1, cfgcr2;
955 };
956 
957 /* this array is indexed by the *shared* pll id */
958 static const struct skl_dpll_regs skl_dpll_regs[4] = {
959 	{
960 		/* DPLL 0 */
961 		.ctl = LCPLL1_CTL,
962 		/* DPLL 0 doesn't support HDMI mode */
963 	},
964 	{
965 		/* DPLL 1 */
966 		.ctl = LCPLL2_CTL,
967 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
968 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
969 	},
970 	{
971 		/* DPLL 2 */
972 		.ctl = WRPLL_CTL(0),
973 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
974 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
975 	},
976 	{
977 		/* DPLL 3 */
978 		.ctl = WRPLL_CTL(1),
979 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
980 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
981 	},
982 };
983 
984 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
985 				    struct intel_shared_dpll *pll)
986 {
987 	const enum intel_dpll_id id = pll->info->id;
988 	u32 val;
989 
990 	val = I915_READ(DPLL_CTRL1);
991 
992 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
993 		 DPLL_CTRL1_SSC(id) |
994 		 DPLL_CTRL1_LINK_RATE_MASK(id));
995 	val |= pll->state.hw_state.ctrl1 << (id * 6);
996 
997 	I915_WRITE(DPLL_CTRL1, val);
998 	POSTING_READ(DPLL_CTRL1);
999 }
1000 
1001 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1002 			       struct intel_shared_dpll *pll)
1003 {
1004 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1005 	const enum intel_dpll_id id = pll->info->id;
1006 
1007 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1008 
1009 	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1010 	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1011 	POSTING_READ(regs[id].cfgcr1);
1012 	POSTING_READ(regs[id].cfgcr2);
1013 
1014 	/* the enable bit is always bit 31 */
1015 	I915_WRITE(regs[id].ctl,
1016 		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
1017 
1018 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1019 		DRM_ERROR("DPLL %d not locked\n", id);
1020 }
1021 
1022 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1023 				 struct intel_shared_dpll *pll)
1024 {
1025 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1026 }
1027 
1028 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1029 				struct intel_shared_dpll *pll)
1030 {
1031 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1032 	const enum intel_dpll_id id = pll->info->id;
1033 
1034 	/* the enable bit is always bit 31 */
1035 	I915_WRITE(regs[id].ctl,
1036 		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1037 	POSTING_READ(regs[id].ctl);
1038 }
1039 
1040 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1041 				  struct intel_shared_dpll *pll)
1042 {
1043 }
1044 
1045 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1046 				     struct intel_shared_dpll *pll,
1047 				     struct intel_dpll_hw_state *hw_state)
1048 {
1049 	u32 val;
1050 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1051 	const enum intel_dpll_id id = pll->info->id;
1052 	intel_wakeref_t wakeref;
1053 	bool ret;
1054 
1055 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1056 						     POWER_DOMAIN_DISPLAY_CORE);
1057 	if (!wakeref)
1058 		return false;
1059 
1060 	ret = false;
1061 
1062 	val = I915_READ(regs[id].ctl);
1063 	if (!(val & LCPLL_PLL_ENABLE))
1064 		goto out;
1065 
1066 	val = I915_READ(DPLL_CTRL1);
1067 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1068 
1069 	/* avoid reading back stale values if HDMI mode is not enabled */
1070 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1071 		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1072 		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1073 	}
1074 	ret = true;
1075 
1076 out:
1077 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1078 
1079 	return ret;
1080 }
1081 
1082 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1083 				       struct intel_shared_dpll *pll,
1084 				       struct intel_dpll_hw_state *hw_state)
1085 {
1086 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1087 	const enum intel_dpll_id id = pll->info->id;
1088 	intel_wakeref_t wakeref;
1089 	u32 val;
1090 	bool ret;
1091 
1092 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1093 						     POWER_DOMAIN_DISPLAY_CORE);
1094 	if (!wakeref)
1095 		return false;
1096 
1097 	ret = false;
1098 
1099 	/* DPLL0 is always enabled since it drives CDCLK */
1100 	val = I915_READ(regs[id].ctl);
1101 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1102 		goto out;
1103 
1104 	val = I915_READ(DPLL_CTRL1);
1105 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1106 
1107 	ret = true;
1108 
1109 out:
1110 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1111 
1112 	return ret;
1113 }
1114 
1115 struct skl_wrpll_context {
1116 	u64 min_deviation;		/* current minimal deviation */
1117 	u64 central_freq;		/* chosen central freq */
1118 	u64 dco_freq;			/* chosen dco freq */
1119 	unsigned int p;			/* chosen divider */
1120 };
1121 
1122 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1123 {
1124 	memset(ctx, 0, sizeof(*ctx));
1125 
1126 	ctx->min_deviation = U64_MAX;
1127 }
1128 
1129 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1130 #define SKL_DCO_MAX_PDEVIATION	100
1131 #define SKL_DCO_MAX_NDEVIATION	600
1132 
1133 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1134 				  u64 central_freq,
1135 				  u64 dco_freq,
1136 				  unsigned int divider)
1137 {
1138 	u64 deviation;
1139 
1140 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1141 			      central_freq);
1142 
1143 	/* positive deviation */
1144 	if (dco_freq >= central_freq) {
1145 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1146 		    deviation < ctx->min_deviation) {
1147 			ctx->min_deviation = deviation;
1148 			ctx->central_freq = central_freq;
1149 			ctx->dco_freq = dco_freq;
1150 			ctx->p = divider;
1151 		}
1152 	/* negative deviation */
1153 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1154 		   deviation < ctx->min_deviation) {
1155 		ctx->min_deviation = deviation;
1156 		ctx->central_freq = central_freq;
1157 		ctx->dco_freq = dco_freq;
1158 		ctx->p = divider;
1159 	}
1160 }
1161 
1162 static void skl_wrpll_get_multipliers(unsigned int p,
1163 				      unsigned int *p0 /* out */,
1164 				      unsigned int *p1 /* out */,
1165 				      unsigned int *p2 /* out */)
1166 {
1167 	/* even dividers */
1168 	if (p % 2 == 0) {
1169 		unsigned int half = p / 2;
1170 
1171 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1172 			*p0 = 2;
1173 			*p1 = 1;
1174 			*p2 = half;
1175 		} else if (half % 2 == 0) {
1176 			*p0 = 2;
1177 			*p1 = half / 2;
1178 			*p2 = 2;
1179 		} else if (half % 3 == 0) {
1180 			*p0 = 3;
1181 			*p1 = half / 3;
1182 			*p2 = 2;
1183 		} else if (half % 7 == 0) {
1184 			*p0 = 7;
1185 			*p1 = half / 7;
1186 			*p2 = 2;
1187 		}
1188 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1189 		*p0 = 3;
1190 		*p1 = 1;
1191 		*p2 = p / 3;
1192 	} else if (p == 5 || p == 7) {
1193 		*p0 = p;
1194 		*p1 = 1;
1195 		*p2 = 1;
1196 	} else if (p == 15) {
1197 		*p0 = 3;
1198 		*p1 = 1;
1199 		*p2 = 5;
1200 	} else if (p == 21) {
1201 		*p0 = 7;
1202 		*p1 = 1;
1203 		*p2 = 3;
1204 	} else if (p == 35) {
1205 		*p0 = 7;
1206 		*p1 = 1;
1207 		*p2 = 5;
1208 	}
1209 }
1210 
1211 struct skl_wrpll_params {
1212 	u32 dco_fraction;
1213 	u32 dco_integer;
1214 	u32 qdiv_ratio;
1215 	u32 qdiv_mode;
1216 	u32 kdiv;
1217 	u32 pdiv;
1218 	u32 central_freq;
1219 };
1220 
1221 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1222 				      u64 afe_clock,
1223 				      u64 central_freq,
1224 				      u32 p0, u32 p1, u32 p2)
1225 {
1226 	u64 dco_freq;
1227 
1228 	switch (central_freq) {
1229 	case 9600000000ULL:
1230 		params->central_freq = 0;
1231 		break;
1232 	case 9000000000ULL:
1233 		params->central_freq = 1;
1234 		break;
1235 	case 8400000000ULL:
1236 		params->central_freq = 3;
1237 	}
1238 
1239 	switch (p0) {
1240 	case 1:
1241 		params->pdiv = 0;
1242 		break;
1243 	case 2:
1244 		params->pdiv = 1;
1245 		break;
1246 	case 3:
1247 		params->pdiv = 2;
1248 		break;
1249 	case 7:
1250 		params->pdiv = 4;
1251 		break;
1252 	default:
1253 		WARN(1, "Incorrect PDiv\n");
1254 	}
1255 
1256 	switch (p2) {
1257 	case 5:
1258 		params->kdiv = 0;
1259 		break;
1260 	case 2:
1261 		params->kdiv = 1;
1262 		break;
1263 	case 3:
1264 		params->kdiv = 2;
1265 		break;
1266 	case 1:
1267 		params->kdiv = 3;
1268 		break;
1269 	default:
1270 		WARN(1, "Incorrect KDiv\n");
1271 	}
1272 
1273 	params->qdiv_ratio = p1;
1274 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1275 
1276 	dco_freq = p0 * p1 * p2 * afe_clock;
1277 
1278 	/*
1279 	 * Intermediate values are in Hz.
1280 	 * Divide by MHz to match bsepc
1281 	 */
1282 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1283 	params->dco_fraction =
1284 		div_u64((div_u64(dco_freq, 24) -
1285 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1286 }
1287 
1288 static bool
1289 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1290 			struct skl_wrpll_params *wrpll_params)
1291 {
1292 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1293 	u64 dco_central_freq[3] = { 8400000000ULL,
1294 				    9000000000ULL,
1295 				    9600000000ULL };
1296 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1297 					     24, 28, 30, 32, 36, 40, 42, 44,
1298 					     48, 52, 54, 56, 60, 64, 66, 68,
1299 					     70, 72, 76, 78, 80, 84, 88, 90,
1300 					     92, 96, 98 };
1301 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1302 	static const struct {
1303 		const int *list;
1304 		int n_dividers;
1305 	} dividers[] = {
1306 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1307 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1308 	};
1309 	struct skl_wrpll_context ctx;
1310 	unsigned int dco, d, i;
1311 	unsigned int p0, p1, p2;
1312 
1313 	skl_wrpll_context_init(&ctx);
1314 
1315 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1316 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1317 			for (i = 0; i < dividers[d].n_dividers; i++) {
1318 				unsigned int p = dividers[d].list[i];
1319 				u64 dco_freq = p * afe_clock;
1320 
1321 				skl_wrpll_try_divider(&ctx,
1322 						      dco_central_freq[dco],
1323 						      dco_freq,
1324 						      p);
1325 				/*
1326 				 * Skip the remaining dividers if we're sure to
1327 				 * have found the definitive divider, we can't
1328 				 * improve a 0 deviation.
1329 				 */
1330 				if (ctx.min_deviation == 0)
1331 					goto skip_remaining_dividers;
1332 			}
1333 		}
1334 
1335 skip_remaining_dividers:
1336 		/*
1337 		 * If a solution is found with an even divider, prefer
1338 		 * this one.
1339 		 */
1340 		if (d == 0 && ctx.p)
1341 			break;
1342 	}
1343 
1344 	if (!ctx.p) {
1345 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1346 		return false;
1347 	}
1348 
1349 	/*
1350 	 * gcc incorrectly analyses that these can be used without being
1351 	 * initialized. To be fair, it's hard to guess.
1352 	 */
1353 	p0 = p1 = p2 = 0;
1354 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1355 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1356 				  p0, p1, p2);
1357 
1358 	return true;
1359 }
1360 
1361 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1362 {
1363 	u32 ctrl1, cfgcr1, cfgcr2;
1364 	struct skl_wrpll_params wrpll_params = { 0, };
1365 
1366 	/*
1367 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1368 	 * as the DPLL id in this function.
1369 	 */
1370 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1371 
1372 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1373 
1374 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1375 				     &wrpll_params))
1376 		return false;
1377 
1378 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1379 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1380 		wrpll_params.dco_integer;
1381 
1382 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1383 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1384 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1385 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1386 		wrpll_params.central_freq;
1387 
1388 	memset(&crtc_state->dpll_hw_state, 0,
1389 	       sizeof(crtc_state->dpll_hw_state));
1390 
1391 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1392 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1393 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1394 	return true;
1395 }
1396 
1397 static bool
1398 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1399 {
1400 	u32 ctrl1;
1401 
1402 	/*
1403 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1404 	 * as the DPLL id in this function.
1405 	 */
1406 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1407 	switch (crtc_state->port_clock / 2) {
1408 	case 81000:
1409 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1410 		break;
1411 	case 135000:
1412 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1413 		break;
1414 	case 270000:
1415 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1416 		break;
1417 		/* eDP 1.4 rates */
1418 	case 162000:
1419 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1420 		break;
1421 	case 108000:
1422 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1423 		break;
1424 	case 216000:
1425 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1426 		break;
1427 	}
1428 
1429 	memset(&crtc_state->dpll_hw_state, 0,
1430 	       sizeof(crtc_state->dpll_hw_state));
1431 
1432 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1433 
1434 	return true;
1435 }
1436 
1437 static bool skl_get_dpll(struct intel_atomic_state *state,
1438 			 struct intel_crtc *crtc,
1439 			 struct intel_encoder *encoder)
1440 {
1441 	struct intel_crtc_state *crtc_state =
1442 		intel_atomic_get_new_crtc_state(state, crtc);
1443 	struct intel_shared_dpll *pll;
1444 	bool bret;
1445 
1446 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1447 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1448 		if (!bret) {
1449 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1450 			return false;
1451 		}
1452 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1453 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1454 		if (!bret) {
1455 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1456 			return false;
1457 		}
1458 	} else {
1459 		return false;
1460 	}
1461 
1462 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1463 		pll = intel_find_shared_dpll(state, crtc,
1464 					     &crtc_state->dpll_hw_state,
1465 					     DPLL_ID_SKL_DPLL0,
1466 					     DPLL_ID_SKL_DPLL0);
1467 	else
1468 		pll = intel_find_shared_dpll(state, crtc,
1469 					     &crtc_state->dpll_hw_state,
1470 					     DPLL_ID_SKL_DPLL1,
1471 					     DPLL_ID_SKL_DPLL3);
1472 	if (!pll)
1473 		return false;
1474 
1475 	intel_reference_shared_dpll(state, crtc,
1476 				    pll, &crtc_state->dpll_hw_state);
1477 
1478 	crtc_state->shared_dpll = pll;
1479 
1480 	return true;
1481 }
1482 
1483 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1484 			      const struct intel_dpll_hw_state *hw_state)
1485 {
1486 	DRM_DEBUG_KMS("dpll_hw_state: "
1487 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1488 		      hw_state->ctrl1,
1489 		      hw_state->cfgcr1,
1490 		      hw_state->cfgcr2);
1491 }
1492 
1493 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1494 	.enable = skl_ddi_pll_enable,
1495 	.disable = skl_ddi_pll_disable,
1496 	.get_hw_state = skl_ddi_pll_get_hw_state,
1497 };
1498 
1499 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1500 	.enable = skl_ddi_dpll0_enable,
1501 	.disable = skl_ddi_dpll0_disable,
1502 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1503 };
1504 
1505 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1506 				struct intel_shared_dpll *pll)
1507 {
1508 	u32 temp;
1509 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1510 	enum dpio_phy phy;
1511 	enum dpio_channel ch;
1512 
1513 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1514 
1515 	/* Non-SSC reference */
1516 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1517 	temp |= PORT_PLL_REF_SEL;
1518 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1519 
1520 	if (IS_GEMINILAKE(dev_priv)) {
1521 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1522 		temp |= PORT_PLL_POWER_ENABLE;
1523 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1524 
1525 		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1526 				 PORT_PLL_POWER_STATE), 200))
1527 			DRM_ERROR("Power state not set for PLL:%d\n", port);
1528 	}
1529 
1530 	/* Disable 10 bit clock */
1531 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1532 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1533 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1534 
1535 	/* Write P1 & P2 */
1536 	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1537 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1538 	temp |= pll->state.hw_state.ebb0;
1539 	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1540 
1541 	/* Write M2 integer */
1542 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1543 	temp &= ~PORT_PLL_M2_MASK;
1544 	temp |= pll->state.hw_state.pll0;
1545 	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1546 
1547 	/* Write N */
1548 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1549 	temp &= ~PORT_PLL_N_MASK;
1550 	temp |= pll->state.hw_state.pll1;
1551 	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1552 
1553 	/* Write M2 fraction */
1554 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1555 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1556 	temp |= pll->state.hw_state.pll2;
1557 	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1558 
1559 	/* Write M2 fraction enable */
1560 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1561 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1562 	temp |= pll->state.hw_state.pll3;
1563 	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1564 
1565 	/* Write coeff */
1566 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1567 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1568 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1569 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1570 	temp |= pll->state.hw_state.pll6;
1571 	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1572 
1573 	/* Write calibration val */
1574 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1575 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1576 	temp |= pll->state.hw_state.pll8;
1577 	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1578 
1579 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1580 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1581 	temp |= pll->state.hw_state.pll9;
1582 	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1583 
1584 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1585 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1586 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1587 	temp |= pll->state.hw_state.pll10;
1588 	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1589 
1590 	/* Recalibrate with new settings */
1591 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1592 	temp |= PORT_PLL_RECALIBRATE;
1593 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1594 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1595 	temp |= pll->state.hw_state.ebb4;
1596 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1597 
1598 	/* Enable PLL */
1599 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1600 	temp |= PORT_PLL_ENABLE;
1601 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1602 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1603 
1604 	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1605 			200))
1606 		DRM_ERROR("PLL %d not locked\n", port);
1607 
1608 	if (IS_GEMINILAKE(dev_priv)) {
1609 		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1610 		temp |= DCC_DELAY_RANGE_2;
1611 		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1612 	}
1613 
1614 	/*
1615 	 * While we write to the group register to program all lanes at once we
1616 	 * can read only lane registers and we pick lanes 0/1 for that.
1617 	 */
1618 	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1619 	temp &= ~LANE_STAGGER_MASK;
1620 	temp &= ~LANESTAGGER_STRAP_OVRD;
1621 	temp |= pll->state.hw_state.pcsdw12;
1622 	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1623 }
1624 
1625 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1626 					struct intel_shared_dpll *pll)
1627 {
1628 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1629 	u32 temp;
1630 
1631 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1632 	temp &= ~PORT_PLL_ENABLE;
1633 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1634 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1635 
1636 	if (IS_GEMINILAKE(dev_priv)) {
1637 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1638 		temp &= ~PORT_PLL_POWER_ENABLE;
1639 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1640 
1641 		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1642 				PORT_PLL_POWER_STATE), 200))
1643 			DRM_ERROR("Power state not reset for PLL:%d\n", port);
1644 	}
1645 }
1646 
1647 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1648 					struct intel_shared_dpll *pll,
1649 					struct intel_dpll_hw_state *hw_state)
1650 {
1651 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1652 	intel_wakeref_t wakeref;
1653 	enum dpio_phy phy;
1654 	enum dpio_channel ch;
1655 	u32 val;
1656 	bool ret;
1657 
1658 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1659 
1660 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1661 						     POWER_DOMAIN_DISPLAY_CORE);
1662 	if (!wakeref)
1663 		return false;
1664 
1665 	ret = false;
1666 
1667 	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1668 	if (!(val & PORT_PLL_ENABLE))
1669 		goto out;
1670 
1671 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1672 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1673 
1674 	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1675 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1676 
1677 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1678 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1679 
1680 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1681 	hw_state->pll1 &= PORT_PLL_N_MASK;
1682 
1683 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1684 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1685 
1686 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1687 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1688 
1689 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1690 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1691 			  PORT_PLL_INT_COEFF_MASK |
1692 			  PORT_PLL_GAIN_CTL_MASK;
1693 
1694 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1695 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1696 
1697 	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1698 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1699 
1700 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1701 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1702 			   PORT_PLL_DCO_AMP_MASK;
1703 
1704 	/*
1705 	 * While we write to the group register to program all lanes at once we
1706 	 * can read only lane registers. We configure all lanes the same way, so
1707 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1708 	 */
1709 	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1710 	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1711 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1712 				 hw_state->pcsdw12,
1713 				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1714 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1715 
1716 	ret = true;
1717 
1718 out:
1719 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1720 
1721 	return ret;
1722 }
1723 
1724 /* bxt clock parameters */
1725 struct bxt_clk_div {
1726 	int clock;
1727 	u32 p1;
1728 	u32 p2;
1729 	u32 m2_int;
1730 	u32 m2_frac;
1731 	bool m2_frac_en;
1732 	u32 n;
1733 
1734 	int vco;
1735 };
1736 
1737 /* pre-calculated values for DP linkrates */
1738 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1739 	{162000, 4, 2, 32, 1677722, 1, 1},
1740 	{270000, 4, 1, 27,       0, 0, 1},
1741 	{540000, 2, 1, 27,       0, 0, 1},
1742 	{216000, 3, 2, 32, 1677722, 1, 1},
1743 	{243000, 4, 1, 24, 1258291, 1, 1},
1744 	{324000, 4, 1, 32, 1677722, 1, 1},
1745 	{432000, 3, 1, 32, 1677722, 1, 1}
1746 };
1747 
1748 static bool
1749 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1750 			  struct bxt_clk_div *clk_div)
1751 {
1752 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1753 	struct dpll best_clock;
1754 
1755 	/* Calculate HDMI div */
1756 	/*
1757 	 * FIXME: tie the following calculation into
1758 	 * i9xx_crtc_compute_clock
1759 	 */
1760 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1761 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1762 				 crtc_state->port_clock,
1763 				 pipe_name(crtc->pipe));
1764 		return false;
1765 	}
1766 
1767 	clk_div->p1 = best_clock.p1;
1768 	clk_div->p2 = best_clock.p2;
1769 	WARN_ON(best_clock.m1 != 2);
1770 	clk_div->n = best_clock.n;
1771 	clk_div->m2_int = best_clock.m2 >> 22;
1772 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1773 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1774 
1775 	clk_div->vco = best_clock.vco;
1776 
1777 	return true;
1778 }
1779 
1780 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1781 				    struct bxt_clk_div *clk_div)
1782 {
1783 	int clock = crtc_state->port_clock;
1784 	int i;
1785 
1786 	*clk_div = bxt_dp_clk_val[0];
1787 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1788 		if (bxt_dp_clk_val[i].clock == clock) {
1789 			*clk_div = bxt_dp_clk_val[i];
1790 			break;
1791 		}
1792 	}
1793 
1794 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1795 }
1796 
1797 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1798 				      const struct bxt_clk_div *clk_div)
1799 {
1800 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1801 	int clock = crtc_state->port_clock;
1802 	int vco = clk_div->vco;
1803 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1804 	u32 lanestagger;
1805 
1806 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1807 
1808 	if (vco >= 6200000 && vco <= 6700000) {
1809 		prop_coef = 4;
1810 		int_coef = 9;
1811 		gain_ctl = 3;
1812 		targ_cnt = 8;
1813 	} else if ((vco > 5400000 && vco < 6200000) ||
1814 			(vco >= 4800000 && vco < 5400000)) {
1815 		prop_coef = 5;
1816 		int_coef = 11;
1817 		gain_ctl = 3;
1818 		targ_cnt = 9;
1819 	} else if (vco == 5400000) {
1820 		prop_coef = 3;
1821 		int_coef = 8;
1822 		gain_ctl = 1;
1823 		targ_cnt = 9;
1824 	} else {
1825 		DRM_ERROR("Invalid VCO\n");
1826 		return false;
1827 	}
1828 
1829 	if (clock > 270000)
1830 		lanestagger = 0x18;
1831 	else if (clock > 135000)
1832 		lanestagger = 0x0d;
1833 	else if (clock > 67000)
1834 		lanestagger = 0x07;
1835 	else if (clock > 33000)
1836 		lanestagger = 0x04;
1837 	else
1838 		lanestagger = 0x02;
1839 
1840 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1841 	dpll_hw_state->pll0 = clk_div->m2_int;
1842 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1843 	dpll_hw_state->pll2 = clk_div->m2_frac;
1844 
1845 	if (clk_div->m2_frac_en)
1846 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1847 
1848 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1849 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1850 
1851 	dpll_hw_state->pll8 = targ_cnt;
1852 
1853 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1854 
1855 	dpll_hw_state->pll10 =
1856 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1857 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1858 
1859 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1860 
1861 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1862 
1863 	return true;
1864 }
1865 
1866 static bool
1867 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1868 {
1869 	struct bxt_clk_div clk_div = {};
1870 
1871 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1872 
1873 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1874 }
1875 
1876 static bool
1877 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1878 {
1879 	struct bxt_clk_div clk_div = {};
1880 
1881 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1882 
1883 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1884 }
1885 
1886 static bool bxt_get_dpll(struct intel_atomic_state *state,
1887 			 struct intel_crtc *crtc,
1888 			 struct intel_encoder *encoder)
1889 {
1890 	struct intel_crtc_state *crtc_state =
1891 		intel_atomic_get_new_crtc_state(state, crtc);
1892 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1893 	struct intel_shared_dpll *pll;
1894 	enum intel_dpll_id id;
1895 
1896 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1897 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1898 		return false;
1899 
1900 	if (intel_crtc_has_dp_encoder(crtc_state) &&
1901 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1902 		return false;
1903 
1904 	/* 1:1 mapping between ports and PLLs */
1905 	id = (enum intel_dpll_id) encoder->port;
1906 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
1907 
1908 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1909 		      crtc->base.base.id, crtc->base.name, pll->info->name);
1910 
1911 	intel_reference_shared_dpll(state, crtc,
1912 				    pll, &crtc_state->dpll_hw_state);
1913 
1914 	crtc_state->shared_dpll = pll;
1915 
1916 	return true;
1917 }
1918 
1919 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1920 			      const struct intel_dpll_hw_state *hw_state)
1921 {
1922 	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1923 		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1924 		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1925 		      hw_state->ebb0,
1926 		      hw_state->ebb4,
1927 		      hw_state->pll0,
1928 		      hw_state->pll1,
1929 		      hw_state->pll2,
1930 		      hw_state->pll3,
1931 		      hw_state->pll6,
1932 		      hw_state->pll8,
1933 		      hw_state->pll9,
1934 		      hw_state->pll10,
1935 		      hw_state->pcsdw12);
1936 }
1937 
1938 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1939 	.enable = bxt_ddi_pll_enable,
1940 	.disable = bxt_ddi_pll_disable,
1941 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1942 };
1943 
1944 struct intel_dpll_mgr {
1945 	const struct dpll_info *dpll_info;
1946 
1947 	bool (*get_dplls)(struct intel_atomic_state *state,
1948 			  struct intel_crtc *crtc,
1949 			  struct intel_encoder *encoder);
1950 	void (*put_dplls)(struct intel_atomic_state *state,
1951 			  struct intel_crtc *crtc);
1952 	void (*update_active_dpll)(struct intel_atomic_state *state,
1953 				   struct intel_crtc *crtc,
1954 				   struct intel_encoder *encoder);
1955 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1956 			      const struct intel_dpll_hw_state *hw_state);
1957 };
1958 
1959 static const struct dpll_info pch_plls[] = {
1960 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1961 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1962 	{ },
1963 };
1964 
1965 static const struct intel_dpll_mgr pch_pll_mgr = {
1966 	.dpll_info = pch_plls,
1967 	.get_dplls = ibx_get_dpll,
1968 	.put_dplls = intel_put_dpll,
1969 	.dump_hw_state = ibx_dump_hw_state,
1970 };
1971 
1972 static const struct dpll_info hsw_plls[] = {
1973 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1974 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1975 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1976 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1977 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1978 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1979 	{ },
1980 };
1981 
1982 static const struct intel_dpll_mgr hsw_pll_mgr = {
1983 	.dpll_info = hsw_plls,
1984 	.get_dplls = hsw_get_dpll,
1985 	.put_dplls = intel_put_dpll,
1986 	.dump_hw_state = hsw_dump_hw_state,
1987 };
1988 
1989 static const struct dpll_info skl_plls[] = {
1990 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1991 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1992 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1993 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1994 	{ },
1995 };
1996 
1997 static const struct intel_dpll_mgr skl_pll_mgr = {
1998 	.dpll_info = skl_plls,
1999 	.get_dplls = skl_get_dpll,
2000 	.put_dplls = intel_put_dpll,
2001 	.dump_hw_state = skl_dump_hw_state,
2002 };
2003 
2004 static const struct dpll_info bxt_plls[] = {
2005 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2006 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2007 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2008 	{ },
2009 };
2010 
2011 static const struct intel_dpll_mgr bxt_pll_mgr = {
2012 	.dpll_info = bxt_plls,
2013 	.get_dplls = bxt_get_dpll,
2014 	.put_dplls = intel_put_dpll,
2015 	.dump_hw_state = bxt_dump_hw_state,
2016 };
2017 
2018 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2019 			       struct intel_shared_dpll *pll)
2020 {
2021 	const enum intel_dpll_id id = pll->info->id;
2022 	u32 val;
2023 
2024 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2025 	val = I915_READ(CNL_DPLL_ENABLE(id));
2026 	val |= PLL_POWER_ENABLE;
2027 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2028 
2029 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2030 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2031 				  PLL_POWER_STATE, 5))
2032 		DRM_ERROR("PLL %d Power not enabled\n", id);
2033 
2034 	/*
2035 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2036 	 * select DP mode, and set DP link rate.
2037 	 */
2038 	val = pll->state.hw_state.cfgcr0;
2039 	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2040 
2041 	/* 4. Reab back to ensure writes completed */
2042 	POSTING_READ(CNL_DPLL_CFGCR0(id));
2043 
2044 	/* 3. Configure DPLL_CFGCR0 */
2045 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2046 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2047 		val = pll->state.hw_state.cfgcr1;
2048 		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2049 		/* 4. Reab back to ensure writes completed */
2050 		POSTING_READ(CNL_DPLL_CFGCR1(id));
2051 	}
2052 
2053 	/*
2054 	 * 5. If the frequency will result in a change to the voltage
2055 	 * requirement, follow the Display Voltage Frequency Switching
2056 	 * Sequence Before Frequency Change
2057 	 *
2058 	 * Note: DVFS is actually handled via the cdclk code paths,
2059 	 * hence we do nothing here.
2060 	 */
2061 
2062 	/* 6. Enable DPLL in DPLL_ENABLE. */
2063 	val = I915_READ(CNL_DPLL_ENABLE(id));
2064 	val |= PLL_ENABLE;
2065 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2066 
2067 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2068 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2069 		DRM_ERROR("PLL %d not locked\n", id);
2070 
2071 	/*
2072 	 * 8. If the frequency will result in a change to the voltage
2073 	 * requirement, follow the Display Voltage Frequency Switching
2074 	 * Sequence After Frequency Change
2075 	 *
2076 	 * Note: DVFS is actually handled via the cdclk code paths,
2077 	 * hence we do nothing here.
2078 	 */
2079 
2080 	/*
2081 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2082 	 * Done at intel_ddi_clk_select
2083 	 */
2084 }
2085 
2086 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2087 				struct intel_shared_dpll *pll)
2088 {
2089 	const enum intel_dpll_id id = pll->info->id;
2090 	u32 val;
2091 
2092 	/*
2093 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2094 	 * Done at intel_ddi_post_disable
2095 	 */
2096 
2097 	/*
2098 	 * 2. If the frequency will result in a change to the voltage
2099 	 * requirement, follow the Display Voltage Frequency Switching
2100 	 * Sequence Before Frequency Change
2101 	 *
2102 	 * Note: DVFS is actually handled via the cdclk code paths,
2103 	 * hence we do nothing here.
2104 	 */
2105 
2106 	/* 3. Disable DPLL through DPLL_ENABLE. */
2107 	val = I915_READ(CNL_DPLL_ENABLE(id));
2108 	val &= ~PLL_ENABLE;
2109 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2110 
2111 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2112 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2113 		DRM_ERROR("PLL %d locked\n", id);
2114 
2115 	/*
2116 	 * 5. If the frequency will result in a change to the voltage
2117 	 * requirement, follow the Display Voltage Frequency Switching
2118 	 * Sequence After Frequency Change
2119 	 *
2120 	 * Note: DVFS is actually handled via the cdclk code paths,
2121 	 * hence we do nothing here.
2122 	 */
2123 
2124 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2125 	val = I915_READ(CNL_DPLL_ENABLE(id));
2126 	val &= ~PLL_POWER_ENABLE;
2127 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2128 
2129 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2130 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2131 				    PLL_POWER_STATE, 5))
2132 		DRM_ERROR("PLL %d Power not disabled\n", id);
2133 }
2134 
2135 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2136 				     struct intel_shared_dpll *pll,
2137 				     struct intel_dpll_hw_state *hw_state)
2138 {
2139 	const enum intel_dpll_id id = pll->info->id;
2140 	intel_wakeref_t wakeref;
2141 	u32 val;
2142 	bool ret;
2143 
2144 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2145 						     POWER_DOMAIN_DISPLAY_CORE);
2146 	if (!wakeref)
2147 		return false;
2148 
2149 	ret = false;
2150 
2151 	val = I915_READ(CNL_DPLL_ENABLE(id));
2152 	if (!(val & PLL_ENABLE))
2153 		goto out;
2154 
2155 	val = I915_READ(CNL_DPLL_CFGCR0(id));
2156 	hw_state->cfgcr0 = val;
2157 
2158 	/* avoid reading back stale values if HDMI mode is not enabled */
2159 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2160 		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2161 	}
2162 	ret = true;
2163 
2164 out:
2165 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2166 
2167 	return ret;
2168 }
2169 
2170 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2171 				      int *qdiv, int *kdiv)
2172 {
2173 	/* even dividers */
2174 	if (bestdiv % 2 == 0) {
2175 		if (bestdiv == 2) {
2176 			*pdiv = 2;
2177 			*qdiv = 1;
2178 			*kdiv = 1;
2179 		} else if (bestdiv % 4 == 0) {
2180 			*pdiv = 2;
2181 			*qdiv = bestdiv / 4;
2182 			*kdiv = 2;
2183 		} else if (bestdiv % 6 == 0) {
2184 			*pdiv = 3;
2185 			*qdiv = bestdiv / 6;
2186 			*kdiv = 2;
2187 		} else if (bestdiv % 5 == 0) {
2188 			*pdiv = 5;
2189 			*qdiv = bestdiv / 10;
2190 			*kdiv = 2;
2191 		} else if (bestdiv % 14 == 0) {
2192 			*pdiv = 7;
2193 			*qdiv = bestdiv / 14;
2194 			*kdiv = 2;
2195 		}
2196 	} else {
2197 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2198 			*pdiv = bestdiv;
2199 			*qdiv = 1;
2200 			*kdiv = 1;
2201 		} else { /* 9, 15, 21 */
2202 			*pdiv = bestdiv / 3;
2203 			*qdiv = 1;
2204 			*kdiv = 3;
2205 		}
2206 	}
2207 }
2208 
2209 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2210 				      u32 dco_freq, u32 ref_freq,
2211 				      int pdiv, int qdiv, int kdiv)
2212 {
2213 	u32 dco;
2214 
2215 	switch (kdiv) {
2216 	case 1:
2217 		params->kdiv = 1;
2218 		break;
2219 	case 2:
2220 		params->kdiv = 2;
2221 		break;
2222 	case 3:
2223 		params->kdiv = 4;
2224 		break;
2225 	default:
2226 		WARN(1, "Incorrect KDiv\n");
2227 	}
2228 
2229 	switch (pdiv) {
2230 	case 2:
2231 		params->pdiv = 1;
2232 		break;
2233 	case 3:
2234 		params->pdiv = 2;
2235 		break;
2236 	case 5:
2237 		params->pdiv = 4;
2238 		break;
2239 	case 7:
2240 		params->pdiv = 8;
2241 		break;
2242 	default:
2243 		WARN(1, "Incorrect PDiv\n");
2244 	}
2245 
2246 	WARN_ON(kdiv != 2 && qdiv != 1);
2247 
2248 	params->qdiv_ratio = qdiv;
2249 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2250 
2251 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2252 
2253 	params->dco_integer = dco >> 15;
2254 	params->dco_fraction = dco & 0x7fff;
2255 }
2256 
2257 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2258 {
2259 	int ref_clock = dev_priv->cdclk.hw.ref;
2260 
2261 	/*
2262 	 * For ICL+, the spec states: if reference frequency is 38.4,
2263 	 * use 19.2 because the DPLL automatically divides that by 2.
2264 	 */
2265 	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2266 		ref_clock = 19200;
2267 
2268 	return ref_clock;
2269 }
2270 
2271 static bool
2272 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2273 			struct skl_wrpll_params *wrpll_params)
2274 {
2275 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2276 	u32 afe_clock = crtc_state->port_clock * 5;
2277 	u32 ref_clock;
2278 	u32 dco_min = 7998000;
2279 	u32 dco_max = 10000000;
2280 	u32 dco_mid = (dco_min + dco_max) / 2;
2281 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2282 					 18, 20, 24, 28, 30, 32,  36,  40,
2283 					 42, 44, 48, 50, 52, 54,  56,  60,
2284 					 64, 66, 68, 70, 72, 76,  78,  80,
2285 					 84, 88, 90, 92, 96, 98, 100, 102,
2286 					  3,  5,  7,  9, 15, 21 };
2287 	u32 dco, best_dco = 0, dco_centrality = 0;
2288 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2289 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2290 
2291 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2292 		dco = afe_clock * dividers[d];
2293 
2294 		if ((dco <= dco_max) && (dco >= dco_min)) {
2295 			dco_centrality = abs(dco - dco_mid);
2296 
2297 			if (dco_centrality < best_dco_centrality) {
2298 				best_dco_centrality = dco_centrality;
2299 				best_div = dividers[d];
2300 				best_dco = dco;
2301 			}
2302 		}
2303 	}
2304 
2305 	if (best_div == 0)
2306 		return false;
2307 
2308 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2309 
2310 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2311 
2312 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2313 				  pdiv, qdiv, kdiv);
2314 
2315 	return true;
2316 }
2317 
2318 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2319 {
2320 	u32 cfgcr0, cfgcr1;
2321 	struct skl_wrpll_params wrpll_params = { 0, };
2322 
2323 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2324 
2325 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2326 		return false;
2327 
2328 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2329 		wrpll_params.dco_integer;
2330 
2331 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2332 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2333 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2334 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2335 		DPLL_CFGCR1_CENTRAL_FREQ;
2336 
2337 	memset(&crtc_state->dpll_hw_state, 0,
2338 	       sizeof(crtc_state->dpll_hw_state));
2339 
2340 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2341 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2342 	return true;
2343 }
2344 
2345 static bool
2346 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2347 {
2348 	u32 cfgcr0;
2349 
2350 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2351 
2352 	switch (crtc_state->port_clock / 2) {
2353 	case 81000:
2354 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2355 		break;
2356 	case 135000:
2357 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2358 		break;
2359 	case 270000:
2360 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2361 		break;
2362 		/* eDP 1.4 rates */
2363 	case 162000:
2364 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2365 		break;
2366 	case 108000:
2367 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2368 		break;
2369 	case 216000:
2370 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2371 		break;
2372 	case 324000:
2373 		/* Some SKUs may require elevated I/O voltage to support this */
2374 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2375 		break;
2376 	case 405000:
2377 		/* Some SKUs may require elevated I/O voltage to support this */
2378 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2379 		break;
2380 	}
2381 
2382 	memset(&crtc_state->dpll_hw_state, 0,
2383 	       sizeof(crtc_state->dpll_hw_state));
2384 
2385 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2386 
2387 	return true;
2388 }
2389 
2390 static bool cnl_get_dpll(struct intel_atomic_state *state,
2391 			 struct intel_crtc *crtc,
2392 			 struct intel_encoder *encoder)
2393 {
2394 	struct intel_crtc_state *crtc_state =
2395 		intel_atomic_get_new_crtc_state(state, crtc);
2396 	struct intel_shared_dpll *pll;
2397 	bool bret;
2398 
2399 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2400 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2401 		if (!bret) {
2402 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2403 			return false;
2404 		}
2405 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2406 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2407 		if (!bret) {
2408 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2409 			return false;
2410 		}
2411 	} else {
2412 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2413 			      crtc_state->output_types);
2414 		return false;
2415 	}
2416 
2417 	pll = intel_find_shared_dpll(state, crtc,
2418 				     &crtc_state->dpll_hw_state,
2419 				     DPLL_ID_SKL_DPLL0,
2420 				     DPLL_ID_SKL_DPLL2);
2421 	if (!pll) {
2422 		DRM_DEBUG_KMS("No PLL selected\n");
2423 		return false;
2424 	}
2425 
2426 	intel_reference_shared_dpll(state, crtc,
2427 				    pll, &crtc_state->dpll_hw_state);
2428 
2429 	crtc_state->shared_dpll = pll;
2430 
2431 	return true;
2432 }
2433 
2434 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2435 			      const struct intel_dpll_hw_state *hw_state)
2436 {
2437 	DRM_DEBUG_KMS("dpll_hw_state: "
2438 		      "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2439 		      hw_state->cfgcr0,
2440 		      hw_state->cfgcr1);
2441 }
2442 
2443 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2444 	.enable = cnl_ddi_pll_enable,
2445 	.disable = cnl_ddi_pll_disable,
2446 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2447 };
2448 
2449 static const struct dpll_info cnl_plls[] = {
2450 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2451 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2452 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2453 	{ },
2454 };
2455 
2456 static const struct intel_dpll_mgr cnl_pll_mgr = {
2457 	.dpll_info = cnl_plls,
2458 	.get_dplls = cnl_get_dpll,
2459 	.put_dplls = intel_put_dpll,
2460 	.dump_hw_state = cnl_dump_hw_state,
2461 };
2462 
2463 struct icl_combo_pll_params {
2464 	int clock;
2465 	struct skl_wrpll_params wrpll;
2466 };
2467 
2468 /*
2469  * These values alrea already adjusted: they're the bits we write to the
2470  * registers, not the logical values.
2471  */
2472 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2473 	{ 540000,
2474 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2475 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2476 	{ 270000,
2477 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2478 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2479 	{ 162000,
2480 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2481 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2482 	{ 324000,
2483 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2484 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 	{ 216000,
2486 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2487 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2488 	{ 432000,
2489 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2490 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2491 	{ 648000,
2492 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2493 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 	{ 810000,
2495 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2496 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 };
2498 
2499 
2500 /* Also used for 38.4 MHz values. */
2501 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2502 	{ 540000,
2503 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2504 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2505 	{ 270000,
2506 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2507 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 	{ 162000,
2509 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2510 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2511 	{ 324000,
2512 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2513 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 	{ 216000,
2515 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2516 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2517 	{ 432000,
2518 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2519 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2520 	{ 648000,
2521 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2522 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 	{ 810000,
2524 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2525 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2526 };
2527 
2528 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2529 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2530 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2531 };
2532 
2533 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2534 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2535 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2536 };
2537 
2538 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2539 				  struct skl_wrpll_params *pll_params)
2540 {
2541 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2542 	const struct icl_combo_pll_params *params =
2543 		dev_priv->cdclk.hw.ref == 24000 ?
2544 		icl_dp_combo_pll_24MHz_values :
2545 		icl_dp_combo_pll_19_2MHz_values;
2546 	int clock = crtc_state->port_clock;
2547 	int i;
2548 
2549 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2550 		if (clock == params[i].clock) {
2551 			*pll_params = params[i].wrpll;
2552 			return true;
2553 		}
2554 	}
2555 
2556 	MISSING_CASE(clock);
2557 	return false;
2558 }
2559 
2560 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2561 			     struct skl_wrpll_params *pll_params)
2562 {
2563 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2564 
2565 	*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2566 			icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2567 	return true;
2568 }
2569 
2570 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2571 				struct intel_encoder *encoder,
2572 				struct intel_dpll_hw_state *pll_state)
2573 {
2574 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2575 	u32 cfgcr0, cfgcr1;
2576 	struct skl_wrpll_params pll_params = { 0 };
2577 	bool ret;
2578 
2579 	if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2580 							encoder->port)))
2581 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2582 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2583 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2584 		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2585 	else
2586 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2587 
2588 	if (!ret)
2589 		return false;
2590 
2591 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2592 		 pll_params.dco_integer;
2593 
2594 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2595 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2596 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2597 		 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2598 
2599 	if (INTEL_GEN(dev_priv) >= 12)
2600 		cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2601 	else
2602 		cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2603 
2604 	memset(pll_state, 0, sizeof(*pll_state));
2605 
2606 	pll_state->cfgcr0 = cfgcr0;
2607 	pll_state->cfgcr1 = cfgcr1;
2608 
2609 	return true;
2610 }
2611 
2612 
2613 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2614 {
2615 	return id - DPLL_ID_ICL_MGPLL1;
2616 }
2617 
2618 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2619 {
2620 	return tc_port + DPLL_ID_ICL_MGPLL1;
2621 }
2622 
2623 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2624 				     u32 *target_dco_khz,
2625 				     struct intel_dpll_hw_state *state)
2626 {
2627 	u32 dco_min_freq, dco_max_freq;
2628 	int div1_vals[] = {7, 5, 3, 2};
2629 	unsigned int i;
2630 	int div2;
2631 
2632 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2633 	dco_max_freq = is_dp ? 8100000 : 10000000;
2634 
2635 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2636 		int div1 = div1_vals[i];
2637 
2638 		for (div2 = 10; div2 > 0; div2--) {
2639 			int dco = div1 * div2 * clock_khz * 5;
2640 			int a_divratio, tlinedrv, inputsel;
2641 			u32 hsdiv;
2642 
2643 			if (dco < dco_min_freq || dco > dco_max_freq)
2644 				continue;
2645 
2646 			if (div2 >= 2) {
2647 				a_divratio = is_dp ? 10 : 5;
2648 				tlinedrv = 2;
2649 			} else {
2650 				a_divratio = 5;
2651 				tlinedrv = 0;
2652 			}
2653 			inputsel = is_dp ? 0 : 1;
2654 
2655 			switch (div1) {
2656 			default:
2657 				MISSING_CASE(div1);
2658 				/* fall through */
2659 			case 2:
2660 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2661 				break;
2662 			case 3:
2663 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2664 				break;
2665 			case 5:
2666 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2667 				break;
2668 			case 7:
2669 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2670 				break;
2671 			}
2672 
2673 			*target_dco_khz = dco;
2674 
2675 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2676 
2677 			state->mg_clktop2_coreclkctl1 =
2678 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2679 
2680 			state->mg_clktop2_hsclkctl =
2681 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2682 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2683 				hsdiv |
2684 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2685 
2686 			return true;
2687 		}
2688 	}
2689 
2690 	return false;
2691 }
2692 
2693 /*
2694  * The specification for this function uses real numbers, so the math had to be
2695  * adapted to integer-only calculation, that's why it looks so different.
2696  */
2697 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2698 				  struct intel_dpll_hw_state *pll_state)
2699 {
2700 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2701 	int refclk_khz = dev_priv->cdclk.hw.ref;
2702 	int clock = crtc_state->port_clock;
2703 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2704 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2705 	u32 prop_coeff, int_coeff;
2706 	u32 tdc_targetcnt, feedfwgain;
2707 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2708 	u64 tmp;
2709 	bool use_ssc = false;
2710 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2711 
2712 	memset(pll_state, 0, sizeof(*pll_state));
2713 
2714 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2715 				      pll_state)) {
2716 		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2717 		return false;
2718 	}
2719 
2720 	m1div = 2;
2721 	m2div_int = dco_khz / (refclk_khz * m1div);
2722 	if (m2div_int > 255) {
2723 		m1div = 4;
2724 		m2div_int = dco_khz / (refclk_khz * m1div);
2725 		if (m2div_int > 255) {
2726 			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2727 				      clock);
2728 			return false;
2729 		}
2730 	}
2731 	m2div_rem = dco_khz % (refclk_khz * m1div);
2732 
2733 	tmp = (u64)m2div_rem * (1 << 22);
2734 	do_div(tmp, refclk_khz * m1div);
2735 	m2div_frac = tmp;
2736 
2737 	switch (refclk_khz) {
2738 	case 19200:
2739 		iref_ndiv = 1;
2740 		iref_trim = 28;
2741 		iref_pulse_w = 1;
2742 		break;
2743 	case 24000:
2744 		iref_ndiv = 1;
2745 		iref_trim = 25;
2746 		iref_pulse_w = 2;
2747 		break;
2748 	case 38400:
2749 		iref_ndiv = 2;
2750 		iref_trim = 28;
2751 		iref_pulse_w = 1;
2752 		break;
2753 	default:
2754 		MISSING_CASE(refclk_khz);
2755 		return false;
2756 	}
2757 
2758 	/*
2759 	 * tdc_res = 0.000003
2760 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2761 	 *
2762 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2763 	 * was supposed to be a division, but we rearranged the operations of
2764 	 * the formula to avoid early divisions so we don't multiply the
2765 	 * rounding errors.
2766 	 *
2767 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2768 	 * we also rearrange to work with integers.
2769 	 *
2770 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2771 	 * last division by 10.
2772 	 */
2773 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2774 
2775 	/*
2776 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2777 	 * 32 bits. That's not a problem since we round the division down
2778 	 * anyway.
2779 	 */
2780 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2781 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2782 
2783 	if (dco_khz >= 9000000) {
2784 		prop_coeff = 5;
2785 		int_coeff = 10;
2786 	} else {
2787 		prop_coeff = 4;
2788 		int_coeff = 8;
2789 	}
2790 
2791 	if (use_ssc) {
2792 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2793 		do_div(tmp, refclk_khz * m1div * 10000);
2794 		ssc_stepsize = tmp;
2795 
2796 		tmp = mul_u32_u32(dco_khz, 1000);
2797 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2798 	} else {
2799 		ssc_stepsize = 0;
2800 		ssc_steplen = 0;
2801 	}
2802 	ssc_steplog = 4;
2803 
2804 	pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2805 				  MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2806 				  MG_PLL_DIV0_FBDIV_INT(m2div_int);
2807 
2808 	pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2809 				 MG_PLL_DIV1_DITHER_DIV_2 |
2810 				 MG_PLL_DIV1_NDIVRATIO(1) |
2811 				 MG_PLL_DIV1_FBPREDIV(m1div);
2812 
2813 	pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2814 			       MG_PLL_LF_AFCCNTSEL_512 |
2815 			       MG_PLL_LF_GAINCTRL(1) |
2816 			       MG_PLL_LF_INT_COEFF(int_coeff) |
2817 			       MG_PLL_LF_PROP_COEFF(prop_coeff);
2818 
2819 	pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2820 				      MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2821 				      MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2822 				      MG_PLL_FRAC_LOCK_DCODITHEREN |
2823 				      MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2824 	if (use_ssc || m2div_rem > 0)
2825 		pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2826 
2827 	pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2828 				MG_PLL_SSC_TYPE(2) |
2829 				MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2830 				MG_PLL_SSC_STEPNUM(ssc_steplog) |
2831 				MG_PLL_SSC_FLLEN |
2832 				MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2833 
2834 	pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2835 					    MG_PLL_TDC_COLDST_IREFINT_EN |
2836 					    MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2837 					    MG_PLL_TDC_TDCOVCCORR_EN |
2838 					    MG_PLL_TDC_TDCSEL(3);
2839 
2840 	pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2841 				 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2842 				 MG_PLL_BIAS_BIAS_BONUS(10) |
2843 				 MG_PLL_BIAS_BIASCAL_EN |
2844 				 MG_PLL_BIAS_CTRIM(12) |
2845 				 MG_PLL_BIAS_VREF_RDAC(4) |
2846 				 MG_PLL_BIAS_IREFTRIM(iref_trim);
2847 
2848 	if (refclk_khz == 38400) {
2849 		pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2850 		pll_state->mg_pll_bias_mask = 0;
2851 	} else {
2852 		pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2853 		pll_state->mg_pll_bias_mask = -1U;
2854 	}
2855 
2856 	pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2857 	pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2858 
2859 	return true;
2860 }
2861 
2862 /**
2863  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2864  * @crtc_state: state for the CRTC to select the DPLL for
2865  * @port_dpll_id: the active @port_dpll_id to select
2866  *
2867  * Select the given @port_dpll_id instance from the DPLLs reserved for the
2868  * CRTC.
2869  */
2870 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2871 			      enum icl_port_dpll_id port_dpll_id)
2872 {
2873 	struct icl_port_dpll *port_dpll =
2874 		&crtc_state->icl_port_dplls[port_dpll_id];
2875 
2876 	crtc_state->shared_dpll = port_dpll->pll;
2877 	crtc_state->dpll_hw_state = port_dpll->hw_state;
2878 }
2879 
2880 static void icl_update_active_dpll(struct intel_atomic_state *state,
2881 				   struct intel_crtc *crtc,
2882 				   struct intel_encoder *encoder)
2883 {
2884 	struct intel_crtc_state *crtc_state =
2885 		intel_atomic_get_new_crtc_state(state, crtc);
2886 	struct intel_digital_port *primary_port;
2887 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2888 
2889 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
2890 		enc_to_mst(&encoder->base)->primary :
2891 		enc_to_dig_port(&encoder->base);
2892 
2893 	if (primary_port &&
2894 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
2895 	     primary_port->tc_mode == TC_PORT_LEGACY))
2896 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
2897 
2898 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
2899 }
2900 
2901 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
2902 				   struct intel_crtc *crtc,
2903 				   struct intel_encoder *encoder)
2904 {
2905 	struct intel_crtc_state *crtc_state =
2906 		intel_atomic_get_new_crtc_state(state, crtc);
2907 	struct icl_port_dpll *port_dpll =
2908 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2909 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2910 	enum port port = encoder->port;
2911 	bool has_dpll4 = false;
2912 
2913 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2914 		DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
2915 
2916 		return false;
2917 	}
2918 
2919 	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
2920 		has_dpll4 = true;
2921 
2922 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2923 						&port_dpll->hw_state,
2924 						DPLL_ID_ICL_DPLL0,
2925 						has_dpll4 ? DPLL_ID_EHL_DPLL4
2926 							  : DPLL_ID_ICL_DPLL1);
2927 	if (!port_dpll->pll) {
2928 		DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
2929 			      port_name(encoder->port));
2930 		return false;
2931 	}
2932 
2933 	intel_reference_shared_dpll(state, crtc,
2934 				    port_dpll->pll, &port_dpll->hw_state);
2935 
2936 	icl_update_active_dpll(state, crtc, encoder);
2937 
2938 	return true;
2939 }
2940 
2941 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
2942 				 struct intel_crtc *crtc,
2943 				 struct intel_encoder *encoder)
2944 {
2945 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2946 	struct intel_crtc_state *crtc_state =
2947 		intel_atomic_get_new_crtc_state(state, crtc);
2948 	struct icl_port_dpll *port_dpll;
2949 	enum intel_dpll_id dpll_id;
2950 
2951 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2952 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2953 		DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
2954 		return false;
2955 	}
2956 
2957 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2958 						&port_dpll->hw_state,
2959 						DPLL_ID_ICL_TBTPLL,
2960 						DPLL_ID_ICL_TBTPLL);
2961 	if (!port_dpll->pll) {
2962 		DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
2963 		return false;
2964 	}
2965 	intel_reference_shared_dpll(state, crtc,
2966 				    port_dpll->pll, &port_dpll->hw_state);
2967 
2968 
2969 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
2970 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
2971 		DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
2972 		goto err_unreference_tbt_pll;
2973 	}
2974 
2975 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
2976 							 encoder->port));
2977 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2978 						&port_dpll->hw_state,
2979 						dpll_id,
2980 						dpll_id);
2981 	if (!port_dpll->pll) {
2982 		DRM_DEBUG_KMS("No MG PHY PLL found\n");
2983 		goto err_unreference_tbt_pll;
2984 	}
2985 	intel_reference_shared_dpll(state, crtc,
2986 				    port_dpll->pll, &port_dpll->hw_state);
2987 
2988 	icl_update_active_dpll(state, crtc, encoder);
2989 
2990 	return true;
2991 
2992 err_unreference_tbt_pll:
2993 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2994 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
2995 
2996 	return false;
2997 }
2998 
2999 static bool icl_get_dplls(struct intel_atomic_state *state,
3000 			  struct intel_crtc *crtc,
3001 			  struct intel_encoder *encoder)
3002 {
3003 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3004 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3005 
3006 	if (intel_phy_is_combo(dev_priv, phy))
3007 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3008 	else if (intel_phy_is_tc(dev_priv, phy))
3009 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3010 
3011 	MISSING_CASE(phy);
3012 
3013 	return false;
3014 }
3015 
3016 static void icl_put_dplls(struct intel_atomic_state *state,
3017 			  struct intel_crtc *crtc)
3018 {
3019 	const struct intel_crtc_state *old_crtc_state =
3020 		intel_atomic_get_old_crtc_state(state, crtc);
3021 	struct intel_crtc_state *new_crtc_state =
3022 		intel_atomic_get_new_crtc_state(state, crtc);
3023 	enum icl_port_dpll_id id;
3024 
3025 	new_crtc_state->shared_dpll = NULL;
3026 
3027 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3028 		const struct icl_port_dpll *old_port_dpll =
3029 			&old_crtc_state->icl_port_dplls[id];
3030 		struct icl_port_dpll *new_port_dpll =
3031 			&new_crtc_state->icl_port_dplls[id];
3032 
3033 		new_port_dpll->pll = NULL;
3034 
3035 		if (!old_port_dpll->pll)
3036 			continue;
3037 
3038 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3039 	}
3040 }
3041 
3042 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3043 				struct intel_shared_dpll *pll,
3044 				struct intel_dpll_hw_state *hw_state)
3045 {
3046 	const enum intel_dpll_id id = pll->info->id;
3047 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3048 	intel_wakeref_t wakeref;
3049 	bool ret = false;
3050 	u32 val;
3051 
3052 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3053 						     POWER_DOMAIN_DISPLAY_CORE);
3054 	if (!wakeref)
3055 		return false;
3056 
3057 	val = I915_READ(MG_PLL_ENABLE(tc_port));
3058 	if (!(val & PLL_ENABLE))
3059 		goto out;
3060 
3061 	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
3062 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3063 
3064 	hw_state->mg_clktop2_coreclkctl1 =
3065 		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3066 	hw_state->mg_clktop2_coreclkctl1 &=
3067 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3068 
3069 	hw_state->mg_clktop2_hsclkctl =
3070 		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3071 	hw_state->mg_clktop2_hsclkctl &=
3072 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3073 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3074 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3075 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3076 
3077 	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
3078 	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
3079 	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
3080 	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
3081 	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
3082 
3083 	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
3084 	hw_state->mg_pll_tdc_coldst_bias =
3085 		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3086 
3087 	if (dev_priv->cdclk.hw.ref == 38400) {
3088 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3089 		hw_state->mg_pll_bias_mask = 0;
3090 	} else {
3091 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3092 		hw_state->mg_pll_bias_mask = -1U;
3093 	}
3094 
3095 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3096 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3097 
3098 	ret = true;
3099 out:
3100 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3101 	return ret;
3102 }
3103 
3104 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3105 				 struct intel_shared_dpll *pll,
3106 				 struct intel_dpll_hw_state *hw_state,
3107 				 i915_reg_t enable_reg)
3108 {
3109 	const enum intel_dpll_id id = pll->info->id;
3110 	intel_wakeref_t wakeref;
3111 	bool ret = false;
3112 	u32 val;
3113 
3114 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3115 						     POWER_DOMAIN_DISPLAY_CORE);
3116 	if (!wakeref)
3117 		return false;
3118 
3119 	val = I915_READ(enable_reg);
3120 	if (!(val & PLL_ENABLE))
3121 		goto out;
3122 
3123 	if (INTEL_GEN(dev_priv) >= 12) {
3124 		hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
3125 		hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
3126 	} else {
3127 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3128 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
3129 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
3130 		} else {
3131 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3132 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3133 		}
3134 	}
3135 
3136 	ret = true;
3137 out:
3138 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3139 	return ret;
3140 }
3141 
3142 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3143 				   struct intel_shared_dpll *pll,
3144 				   struct intel_dpll_hw_state *hw_state)
3145 {
3146 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3147 
3148 	if (IS_ELKHARTLAKE(dev_priv) &&
3149 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3150 		enable_reg = MG_PLL_ENABLE(0);
3151 	}
3152 
3153 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3154 }
3155 
3156 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3157 				 struct intel_shared_dpll *pll,
3158 				 struct intel_dpll_hw_state *hw_state)
3159 {
3160 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3161 }
3162 
3163 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3164 			   struct intel_shared_dpll *pll)
3165 {
3166 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3167 	const enum intel_dpll_id id = pll->info->id;
3168 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3169 
3170 	if (INTEL_GEN(dev_priv) >= 12) {
3171 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3172 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3173 	} else {
3174 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3175 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3176 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3177 		} else {
3178 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3179 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3180 		}
3181 	}
3182 
3183 	I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
3184 	I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
3185 	POSTING_READ(cfgcr1_reg);
3186 }
3187 
3188 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3189 			     struct intel_shared_dpll *pll)
3190 {
3191 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3192 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3193 	u32 val;
3194 
3195 	/*
3196 	 * Some of the following registers have reserved fields, so program
3197 	 * these with RMW based on a mask. The mask can be fixed or generated
3198 	 * during the calc/readout phase if the mask depends on some other HW
3199 	 * state like refclk, see icl_calc_mg_pll_state().
3200 	 */
3201 	val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3202 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3203 	val |= hw_state->mg_refclkin_ctl;
3204 	I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3205 
3206 	val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3207 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3208 	val |= hw_state->mg_clktop2_coreclkctl1;
3209 	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3210 
3211 	val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3212 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3213 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3214 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3215 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3216 	val |= hw_state->mg_clktop2_hsclkctl;
3217 	I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3218 
3219 	I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3220 	I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3221 	I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3222 	I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3223 	I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3224 
3225 	val = I915_READ(MG_PLL_BIAS(tc_port));
3226 	val &= ~hw_state->mg_pll_bias_mask;
3227 	val |= hw_state->mg_pll_bias;
3228 	I915_WRITE(MG_PLL_BIAS(tc_port), val);
3229 
3230 	val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3231 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3232 	val |= hw_state->mg_pll_tdc_coldst_bias;
3233 	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3234 
3235 	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3236 }
3237 
3238 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3239 				 struct intel_shared_dpll *pll,
3240 				 i915_reg_t enable_reg)
3241 {
3242 	u32 val;
3243 
3244 	val = I915_READ(enable_reg);
3245 	val |= PLL_POWER_ENABLE;
3246 	I915_WRITE(enable_reg, val);
3247 
3248 	/*
3249 	 * The spec says we need to "wait" but it also says it should be
3250 	 * immediate.
3251 	 */
3252 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3253 		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3254 }
3255 
3256 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3257 			   struct intel_shared_dpll *pll,
3258 			   i915_reg_t enable_reg)
3259 {
3260 	u32 val;
3261 
3262 	val = I915_READ(enable_reg);
3263 	val |= PLL_ENABLE;
3264 	I915_WRITE(enable_reg, val);
3265 
3266 	/* Timeout is actually 600us. */
3267 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3268 		DRM_ERROR("PLL %d not locked\n", pll->info->id);
3269 }
3270 
3271 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3272 			     struct intel_shared_dpll *pll)
3273 {
3274 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3275 
3276 	if (IS_ELKHARTLAKE(dev_priv) &&
3277 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3278 		enable_reg = MG_PLL_ENABLE(0);
3279 
3280 		/*
3281 		 * We need to disable DC states when this DPLL is enabled.
3282 		 * This can be done by taking a reference on DPLL4 power
3283 		 * domain.
3284 		 */
3285 		pll->wakeref = intel_display_power_get(dev_priv,
3286 						       POWER_DOMAIN_DPLL_DC_OFF);
3287 	}
3288 
3289 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3290 
3291 	icl_dpll_write(dev_priv, pll);
3292 
3293 	/*
3294 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3295 	 * paths should already be setting the appropriate voltage, hence we do
3296 	 * nothing here.
3297 	 */
3298 
3299 	icl_pll_enable(dev_priv, pll, enable_reg);
3300 
3301 	/* DVFS post sequence would be here. See the comment above. */
3302 }
3303 
3304 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3305 			   struct intel_shared_dpll *pll)
3306 {
3307 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3308 
3309 	icl_dpll_write(dev_priv, pll);
3310 
3311 	/*
3312 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3313 	 * paths should already be setting the appropriate voltage, hence we do
3314 	 * nothing here.
3315 	 */
3316 
3317 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3318 
3319 	/* DVFS post sequence would be here. See the comment above. */
3320 }
3321 
3322 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3323 			  struct intel_shared_dpll *pll)
3324 {
3325 	i915_reg_t enable_reg =
3326 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3327 
3328 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3329 
3330 	icl_mg_pll_write(dev_priv, pll);
3331 
3332 	/*
3333 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3334 	 * paths should already be setting the appropriate voltage, hence we do
3335 	 * nothing here.
3336 	 */
3337 
3338 	icl_pll_enable(dev_priv, pll, enable_reg);
3339 
3340 	/* DVFS post sequence would be here. See the comment above. */
3341 }
3342 
3343 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3344 			    struct intel_shared_dpll *pll,
3345 			    i915_reg_t enable_reg)
3346 {
3347 	u32 val;
3348 
3349 	/* The first steps are done by intel_ddi_post_disable(). */
3350 
3351 	/*
3352 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3353 	 * paths should already be setting the appropriate voltage, hence we do
3354 	 * nothign here.
3355 	 */
3356 
3357 	val = I915_READ(enable_reg);
3358 	val &= ~PLL_ENABLE;
3359 	I915_WRITE(enable_reg, val);
3360 
3361 	/* Timeout is actually 1us. */
3362 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3363 		DRM_ERROR("PLL %d locked\n", pll->info->id);
3364 
3365 	/* DVFS post sequence would be here. See the comment above. */
3366 
3367 	val = I915_READ(enable_reg);
3368 	val &= ~PLL_POWER_ENABLE;
3369 	I915_WRITE(enable_reg, val);
3370 
3371 	/*
3372 	 * The spec says we need to "wait" but it also says it should be
3373 	 * immediate.
3374 	 */
3375 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3376 		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3377 }
3378 
3379 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3380 			      struct intel_shared_dpll *pll)
3381 {
3382 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3383 
3384 	if (IS_ELKHARTLAKE(dev_priv) &&
3385 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3386 		enable_reg = MG_PLL_ENABLE(0);
3387 		icl_pll_disable(dev_priv, pll, enable_reg);
3388 
3389 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3390 					pll->wakeref);
3391 		return;
3392 	}
3393 
3394 	icl_pll_disable(dev_priv, pll, enable_reg);
3395 }
3396 
3397 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3398 			    struct intel_shared_dpll *pll)
3399 {
3400 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3401 }
3402 
3403 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3404 			   struct intel_shared_dpll *pll)
3405 {
3406 	i915_reg_t enable_reg =
3407 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3408 
3409 	icl_pll_disable(dev_priv, pll, enable_reg);
3410 }
3411 
3412 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3413 			      const struct intel_dpll_hw_state *hw_state)
3414 {
3415 	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3416 		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3417 		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3418 		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3419 		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3420 		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3421 		      hw_state->cfgcr0, hw_state->cfgcr1,
3422 		      hw_state->mg_refclkin_ctl,
3423 		      hw_state->mg_clktop2_coreclkctl1,
3424 		      hw_state->mg_clktop2_hsclkctl,
3425 		      hw_state->mg_pll_div0,
3426 		      hw_state->mg_pll_div1,
3427 		      hw_state->mg_pll_lf,
3428 		      hw_state->mg_pll_frac_lock,
3429 		      hw_state->mg_pll_ssc,
3430 		      hw_state->mg_pll_bias,
3431 		      hw_state->mg_pll_tdc_coldst_bias);
3432 }
3433 
3434 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3435 	.enable = combo_pll_enable,
3436 	.disable = combo_pll_disable,
3437 	.get_hw_state = combo_pll_get_hw_state,
3438 };
3439 
3440 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3441 	.enable = tbt_pll_enable,
3442 	.disable = tbt_pll_disable,
3443 	.get_hw_state = tbt_pll_get_hw_state,
3444 };
3445 
3446 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3447 	.enable = mg_pll_enable,
3448 	.disable = mg_pll_disable,
3449 	.get_hw_state = mg_pll_get_hw_state,
3450 };
3451 
3452 static const struct dpll_info icl_plls[] = {
3453 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3454 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3455 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3456 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3457 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3458 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3459 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3460 	{ },
3461 };
3462 
3463 static const struct intel_dpll_mgr icl_pll_mgr = {
3464 	.dpll_info = icl_plls,
3465 	.get_dplls = icl_get_dplls,
3466 	.put_dplls = icl_put_dplls,
3467 	.update_active_dpll = icl_update_active_dpll,
3468 	.dump_hw_state = icl_dump_hw_state,
3469 };
3470 
3471 static const struct dpll_info ehl_plls[] = {
3472 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3473 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3474 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3475 	{ },
3476 };
3477 
3478 static const struct intel_dpll_mgr ehl_pll_mgr = {
3479 	.dpll_info = ehl_plls,
3480 	.get_dplls = icl_get_dplls,
3481 	.put_dplls = icl_put_dplls,
3482 	.dump_hw_state = icl_dump_hw_state,
3483 };
3484 
3485 static const struct dpll_info tgl_plls[] = {
3486 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3487 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3488 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3489 	/* TODO: Add typeC plls */
3490 	{ },
3491 };
3492 
3493 static const struct intel_dpll_mgr tgl_pll_mgr = {
3494 	.dpll_info = tgl_plls,
3495 	.get_dplls = icl_get_dplls,
3496 	.put_dplls = icl_put_dplls,
3497 	.dump_hw_state = icl_dump_hw_state,
3498 };
3499 
3500 /**
3501  * intel_shared_dpll_init - Initialize shared DPLLs
3502  * @dev: drm device
3503  *
3504  * Initialize shared DPLLs for @dev.
3505  */
3506 void intel_shared_dpll_init(struct drm_device *dev)
3507 {
3508 	struct drm_i915_private *dev_priv = to_i915(dev);
3509 	const struct intel_dpll_mgr *dpll_mgr = NULL;
3510 	const struct dpll_info *dpll_info;
3511 	int i;
3512 
3513 	if (INTEL_GEN(dev_priv) >= 12)
3514 		dpll_mgr = &tgl_pll_mgr;
3515 	else if (IS_ELKHARTLAKE(dev_priv))
3516 		dpll_mgr = &ehl_pll_mgr;
3517 	else if (INTEL_GEN(dev_priv) >= 11)
3518 		dpll_mgr = &icl_pll_mgr;
3519 	else if (IS_CANNONLAKE(dev_priv))
3520 		dpll_mgr = &cnl_pll_mgr;
3521 	else if (IS_GEN9_BC(dev_priv))
3522 		dpll_mgr = &skl_pll_mgr;
3523 	else if (IS_GEN9_LP(dev_priv))
3524 		dpll_mgr = &bxt_pll_mgr;
3525 	else if (HAS_DDI(dev_priv))
3526 		dpll_mgr = &hsw_pll_mgr;
3527 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3528 		dpll_mgr = &pch_pll_mgr;
3529 
3530 	if (!dpll_mgr) {
3531 		dev_priv->num_shared_dpll = 0;
3532 		return;
3533 	}
3534 
3535 	dpll_info = dpll_mgr->dpll_info;
3536 
3537 	for (i = 0; dpll_info[i].name; i++) {
3538 		WARN_ON(i != dpll_info[i].id);
3539 		dev_priv->shared_dplls[i].info = &dpll_info[i];
3540 	}
3541 
3542 	dev_priv->dpll_mgr = dpll_mgr;
3543 	dev_priv->num_shared_dpll = i;
3544 	mutex_init(&dev_priv->dpll_lock);
3545 
3546 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3547 }
3548 
3549 /**
3550  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3551  * @state: atomic state
3552  * @crtc: CRTC to reserve DPLLs for
3553  * @encoder: encoder
3554  *
3555  * This function reserves all required DPLLs for the given CRTC and encoder
3556  * combination in the current atomic commit @state and the new @crtc atomic
3557  * state.
3558  *
3559  * The new configuration in the atomic commit @state is made effective by
3560  * calling intel_shared_dpll_swap_state().
3561  *
3562  * The reserved DPLLs should be released by calling
3563  * intel_release_shared_dplls().
3564  *
3565  * Returns:
3566  * True if all required DPLLs were successfully reserved.
3567  */
3568 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3569 				struct intel_crtc *crtc,
3570 				struct intel_encoder *encoder)
3571 {
3572 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3573 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3574 
3575 	if (WARN_ON(!dpll_mgr))
3576 		return false;
3577 
3578 	return dpll_mgr->get_dplls(state, crtc, encoder);
3579 }
3580 
3581 /**
3582  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3583  * @state: atomic state
3584  * @crtc: crtc from which the DPLLs are to be released
3585  *
3586  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3587  * from the current atomic commit @state and the old @crtc atomic state.
3588  *
3589  * The new configuration in the atomic commit @state is made effective by
3590  * calling intel_shared_dpll_swap_state().
3591  */
3592 void intel_release_shared_dplls(struct intel_atomic_state *state,
3593 				struct intel_crtc *crtc)
3594 {
3595 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3596 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3597 
3598 	/*
3599 	 * FIXME: this function is called for every platform having a
3600 	 * compute_clock hook, even though the platform doesn't yet support
3601 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3602 	 * called on those.
3603 	 */
3604 	if (!dpll_mgr)
3605 		return;
3606 
3607 	dpll_mgr->put_dplls(state, crtc);
3608 }
3609 
3610 /**
3611  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3612  * @state: atomic state
3613  * @crtc: the CRTC for which to update the active DPLL
3614  * @encoder: encoder determining the type of port DPLL
3615  *
3616  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3617  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3618  * DPLL selected will be based on the current mode of the encoder's port.
3619  */
3620 void intel_update_active_dpll(struct intel_atomic_state *state,
3621 			      struct intel_crtc *crtc,
3622 			      struct intel_encoder *encoder)
3623 {
3624 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3625 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3626 
3627 	if (WARN_ON(!dpll_mgr))
3628 		return;
3629 
3630 	dpll_mgr->update_active_dpll(state, crtc, encoder);
3631 }
3632 
3633 /**
3634  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3635  * @dev_priv: i915 drm device
3636  * @hw_state: hw state to be written to the log
3637  *
3638  * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3639  */
3640 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3641 			      const struct intel_dpll_hw_state *hw_state)
3642 {
3643 	if (dev_priv->dpll_mgr) {
3644 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3645 	} else {
3646 		/* fallback for platforms that don't use the shared dpll
3647 		 * infrastructure
3648 		 */
3649 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3650 			      "fp0: 0x%x, fp1: 0x%x\n",
3651 			      hw_state->dpll,
3652 			      hw_state->dpll_md,
3653 			      hw_state->fp0,
3654 			      hw_state->fp1);
3655 	}
3656 }
3657