xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision bf62221e9d0e1e4ba50ab2b331a0008c15de97be)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_de.h"
25 #include "intel_display_types.h"
26 #include "intel_dpio_phy.h"
27 #include "intel_dpll.h"
28 #include "intel_dpll_mgr.h"
29 
30 /**
31  * DOC: Display PLLs
32  *
33  * Display PLLs used for driving outputs vary by platform. While some have
34  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
35  * from a pool. In the latter scenario, it is possible that multiple pipes
36  * share a PLL if their configurations match.
37  *
38  * This file provides an abstraction over display PLLs. The function
39  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
40  * users of a PLL are tracked and that tracking is integrated with the atomic
41  * modset interface. During an atomic operation, required PLLs can be reserved
42  * for a given CRTC and encoder configuration by calling
43  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
44  * with intel_release_shared_dplls().
45  * Changes to the users are first staged in the atomic state, and then made
46  * effective by calling intel_shared_dpll_swap_state() during the atomic
47  * commit phase.
48  */
49 
50 struct intel_dpll_mgr {
51 	const struct dpll_info *dpll_info;
52 
53 	bool (*get_dplls)(struct intel_atomic_state *state,
54 			  struct intel_crtc *crtc,
55 			  struct intel_encoder *encoder);
56 	void (*put_dplls)(struct intel_atomic_state *state,
57 			  struct intel_crtc *crtc);
58 	void (*update_active_dpll)(struct intel_atomic_state *state,
59 				   struct intel_crtc *crtc,
60 				   struct intel_encoder *encoder);
61 	void (*update_ref_clks)(struct drm_i915_private *i915);
62 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
63 			      const struct intel_dpll_hw_state *hw_state);
64 };
65 
66 static void
67 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
68 				  struct intel_shared_dpll_state *shared_dpll)
69 {
70 	enum intel_dpll_id i;
71 
72 	/* Copy shared dpll state */
73 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
74 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
75 
76 		shared_dpll[i] = pll->state;
77 	}
78 }
79 
80 static struct intel_shared_dpll_state *
81 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
82 {
83 	struct intel_atomic_state *state = to_intel_atomic_state(s);
84 
85 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
86 
87 	if (!state->dpll_set) {
88 		state->dpll_set = true;
89 
90 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
91 						  state->shared_dpll);
92 	}
93 
94 	return state->shared_dpll;
95 }
96 
97 /**
98  * intel_get_shared_dpll_by_id - get a DPLL given its id
99  * @dev_priv: i915 device instance
100  * @id: pll id
101  *
102  * Returns:
103  * A pointer to the DPLL with @id
104  */
105 struct intel_shared_dpll *
106 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
107 			    enum intel_dpll_id id)
108 {
109 	return &dev_priv->dpll.shared_dplls[id];
110 }
111 
112 /**
113  * intel_get_shared_dpll_id - get the id of a DPLL
114  * @dev_priv: i915 device instance
115  * @pll: the DPLL
116  *
117  * Returns:
118  * The id of @pll
119  */
120 enum intel_dpll_id
121 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
122 			 struct intel_shared_dpll *pll)
123 {
124 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
125 
126 	if (drm_WARN_ON(&dev_priv->drm,
127 			pll_idx < 0 ||
128 			pll_idx >= dev_priv->dpll.num_shared_dpll))
129 		return -1;
130 
131 	return pll_idx;
132 }
133 
134 /* For ILK+ */
135 void assert_shared_dpll(struct drm_i915_private *dev_priv,
136 			struct intel_shared_dpll *pll,
137 			bool state)
138 {
139 	bool cur_state;
140 	struct intel_dpll_hw_state hw_state;
141 
142 	if (drm_WARN(&dev_priv->drm, !pll,
143 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
144 		return;
145 
146 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
147 	I915_STATE_WARN(cur_state != state,
148 	     "%s assertion failure (expected %s, current %s)\n",
149 			pll->info->name, onoff(state), onoff(cur_state));
150 }
151 
152 static i915_reg_t
153 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
154 			   struct intel_shared_dpll *pll)
155 {
156 	if (IS_DG1(i915))
157 		return DG1_DPLL_ENABLE(pll->info->id);
158 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
159 		return MG_PLL_ENABLE(0);
160 
161 	return CNL_DPLL_ENABLE(pll->info->id);
162 }
163 
164 /**
165  * intel_prepare_shared_dpll - call a dpll's prepare hook
166  * @crtc_state: CRTC, and its state, which has a shared dpll
167  *
168  * This calls the PLL's prepare hook if it has one and if the PLL is not
169  * already enabled. The prepare hook is platform specific.
170  */
171 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
172 {
173 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
174 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
175 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
176 
177 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
178 		return;
179 
180 	mutex_lock(&dev_priv->dpll.lock);
181 	drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
182 	if (!pll->active_mask) {
183 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
184 		drm_WARN_ON(&dev_priv->drm, pll->on);
185 		assert_shared_dpll_disabled(dev_priv, pll);
186 
187 		pll->info->funcs->prepare(dev_priv, pll);
188 	}
189 	mutex_unlock(&dev_priv->dpll.lock);
190 }
191 
192 /**
193  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
194  * @crtc_state: CRTC, and its state, which has a shared DPLL
195  *
196  * Enable the shared DPLL used by @crtc.
197  */
198 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
199 {
200 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
201 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
202 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
203 	unsigned int pipe_mask = BIT(crtc->pipe);
204 	unsigned int old_mask;
205 
206 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
207 		return;
208 
209 	mutex_lock(&dev_priv->dpll.lock);
210 	old_mask = pll->active_mask;
211 
212 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
213 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
214 		goto out;
215 
216 	pll->active_mask |= pipe_mask;
217 
218 	drm_dbg_kms(&dev_priv->drm,
219 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
220 		    pll->info->name, pll->active_mask, pll->on,
221 		    crtc->base.base.id, crtc->base.name);
222 
223 	if (old_mask) {
224 		drm_WARN_ON(&dev_priv->drm, !pll->on);
225 		assert_shared_dpll_enabled(dev_priv, pll);
226 		goto out;
227 	}
228 	drm_WARN_ON(&dev_priv->drm, pll->on);
229 
230 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
231 	pll->info->funcs->enable(dev_priv, pll);
232 	pll->on = true;
233 
234 out:
235 	mutex_unlock(&dev_priv->dpll.lock);
236 }
237 
238 /**
239  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
240  * @crtc_state: CRTC, and its state, which has a shared DPLL
241  *
242  * Disable the shared DPLL used by @crtc.
243  */
244 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
245 {
246 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
247 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
248 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
249 	unsigned int pipe_mask = BIT(crtc->pipe);
250 
251 	/* PCH only available on ILK+ */
252 	if (DISPLAY_VER(dev_priv) < 5)
253 		return;
254 
255 	if (pll == NULL)
256 		return;
257 
258 	mutex_lock(&dev_priv->dpll.lock);
259 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
260 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
261 		     crtc->base.base.id, crtc->base.name))
262 		goto out;
263 
264 	drm_dbg_kms(&dev_priv->drm,
265 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
266 		    pll->info->name, pll->active_mask, pll->on,
267 		    crtc->base.base.id, crtc->base.name);
268 
269 	assert_shared_dpll_enabled(dev_priv, pll);
270 	drm_WARN_ON(&dev_priv->drm, !pll->on);
271 
272 	pll->active_mask &= ~pipe_mask;
273 	if (pll->active_mask)
274 		goto out;
275 
276 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
277 	pll->info->funcs->disable(dev_priv, pll);
278 	pll->on = false;
279 
280 out:
281 	mutex_unlock(&dev_priv->dpll.lock);
282 }
283 
284 static struct intel_shared_dpll *
285 intel_find_shared_dpll(struct intel_atomic_state *state,
286 		       const struct intel_crtc *crtc,
287 		       const struct intel_dpll_hw_state *pll_state,
288 		       unsigned long dpll_mask)
289 {
290 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
291 	struct intel_shared_dpll *pll, *unused_pll = NULL;
292 	struct intel_shared_dpll_state *shared_dpll;
293 	enum intel_dpll_id i;
294 
295 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
296 
297 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
298 
299 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
300 		pll = &dev_priv->dpll.shared_dplls[i];
301 
302 		/* Only want to check enabled timings first */
303 		if (shared_dpll[i].pipe_mask == 0) {
304 			if (!unused_pll)
305 				unused_pll = pll;
306 			continue;
307 		}
308 
309 		if (memcmp(pll_state,
310 			   &shared_dpll[i].hw_state,
311 			   sizeof(*pll_state)) == 0) {
312 			drm_dbg_kms(&dev_priv->drm,
313 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
314 				    crtc->base.base.id, crtc->base.name,
315 				    pll->info->name,
316 				    shared_dpll[i].pipe_mask,
317 				    pll->active_mask);
318 			return pll;
319 		}
320 	}
321 
322 	/* Ok no matching timings, maybe there's a free one? */
323 	if (unused_pll) {
324 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
325 			    crtc->base.base.id, crtc->base.name,
326 			    unused_pll->info->name);
327 		return unused_pll;
328 	}
329 
330 	return NULL;
331 }
332 
333 static void
334 intel_reference_shared_dpll(struct intel_atomic_state *state,
335 			    const struct intel_crtc *crtc,
336 			    const struct intel_shared_dpll *pll,
337 			    const struct intel_dpll_hw_state *pll_state)
338 {
339 	struct drm_i915_private *i915 = to_i915(state->base.dev);
340 	struct intel_shared_dpll_state *shared_dpll;
341 	const enum intel_dpll_id id = pll->info->id;
342 
343 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
344 
345 	if (shared_dpll[id].pipe_mask == 0)
346 		shared_dpll[id].hw_state = *pll_state;
347 
348 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
349 		pipe_name(crtc->pipe));
350 
351 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
352 }
353 
354 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
355 					  const struct intel_crtc *crtc,
356 					  const struct intel_shared_dpll *pll)
357 {
358 	struct intel_shared_dpll_state *shared_dpll;
359 
360 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
361 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
362 }
363 
364 static void intel_put_dpll(struct intel_atomic_state *state,
365 			   struct intel_crtc *crtc)
366 {
367 	const struct intel_crtc_state *old_crtc_state =
368 		intel_atomic_get_old_crtc_state(state, crtc);
369 	struct intel_crtc_state *new_crtc_state =
370 		intel_atomic_get_new_crtc_state(state, crtc);
371 
372 	new_crtc_state->shared_dpll = NULL;
373 
374 	if (!old_crtc_state->shared_dpll)
375 		return;
376 
377 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
378 }
379 
380 /**
381  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
382  * @state: atomic state
383  *
384  * This is the dpll version of drm_atomic_helper_swap_state() since the
385  * helper does not handle driver-specific global state.
386  *
387  * For consistency with atomic helpers this function does a complete swap,
388  * i.e. it also puts the current state into @state, even though there is no
389  * need for that at this moment.
390  */
391 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
392 {
393 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
394 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
395 	enum intel_dpll_id i;
396 
397 	if (!state->dpll_set)
398 		return;
399 
400 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
401 		struct intel_shared_dpll *pll =
402 			&dev_priv->dpll.shared_dplls[i];
403 
404 		swap(pll->state, shared_dpll[i]);
405 	}
406 }
407 
408 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
409 				      struct intel_shared_dpll *pll,
410 				      struct intel_dpll_hw_state *hw_state)
411 {
412 	const enum intel_dpll_id id = pll->info->id;
413 	intel_wakeref_t wakeref;
414 	u32 val;
415 
416 	wakeref = intel_display_power_get_if_enabled(dev_priv,
417 						     POWER_DOMAIN_DISPLAY_CORE);
418 	if (!wakeref)
419 		return false;
420 
421 	val = intel_de_read(dev_priv, PCH_DPLL(id));
422 	hw_state->dpll = val;
423 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
424 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
425 
426 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
427 
428 	return val & DPLL_VCO_ENABLE;
429 }
430 
431 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
432 				 struct intel_shared_dpll *pll)
433 {
434 	const enum intel_dpll_id id = pll->info->id;
435 
436 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
437 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
438 }
439 
440 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
441 {
442 	u32 val;
443 	bool enabled;
444 
445 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
446 
447 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
448 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
449 			    DREF_SUPERSPREAD_SOURCE_MASK));
450 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
451 }
452 
453 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
454 				struct intel_shared_dpll *pll)
455 {
456 	const enum intel_dpll_id id = pll->info->id;
457 
458 	/* PCH refclock must be enabled first */
459 	ibx_assert_pch_refclk_enabled(dev_priv);
460 
461 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
462 
463 	/* Wait for the clocks to stabilize. */
464 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
465 	udelay(150);
466 
467 	/* The pixel multiplier can only be updated once the
468 	 * DPLL is enabled and the clocks are stable.
469 	 *
470 	 * So write it again.
471 	 */
472 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
473 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
474 	udelay(200);
475 }
476 
477 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
478 				 struct intel_shared_dpll *pll)
479 {
480 	const enum intel_dpll_id id = pll->info->id;
481 
482 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
483 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
484 	udelay(200);
485 }
486 
487 static bool ibx_get_dpll(struct intel_atomic_state *state,
488 			 struct intel_crtc *crtc,
489 			 struct intel_encoder *encoder)
490 {
491 	struct intel_crtc_state *crtc_state =
492 		intel_atomic_get_new_crtc_state(state, crtc);
493 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
494 	struct intel_shared_dpll *pll;
495 	enum intel_dpll_id i;
496 
497 	if (HAS_PCH_IBX(dev_priv)) {
498 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
499 		i = (enum intel_dpll_id) crtc->pipe;
500 		pll = &dev_priv->dpll.shared_dplls[i];
501 
502 		drm_dbg_kms(&dev_priv->drm,
503 			    "[CRTC:%d:%s] using pre-allocated %s\n",
504 			    crtc->base.base.id, crtc->base.name,
505 			    pll->info->name);
506 	} else {
507 		pll = intel_find_shared_dpll(state, crtc,
508 					     &crtc_state->dpll_hw_state,
509 					     BIT(DPLL_ID_PCH_PLL_B) |
510 					     BIT(DPLL_ID_PCH_PLL_A));
511 	}
512 
513 	if (!pll)
514 		return false;
515 
516 	/* reference the pll */
517 	intel_reference_shared_dpll(state, crtc,
518 				    pll, &crtc_state->dpll_hw_state);
519 
520 	crtc_state->shared_dpll = pll;
521 
522 	return true;
523 }
524 
525 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
526 			      const struct intel_dpll_hw_state *hw_state)
527 {
528 	drm_dbg_kms(&dev_priv->drm,
529 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
530 		    "fp0: 0x%x, fp1: 0x%x\n",
531 		    hw_state->dpll,
532 		    hw_state->dpll_md,
533 		    hw_state->fp0,
534 		    hw_state->fp1);
535 }
536 
537 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
538 	.prepare = ibx_pch_dpll_prepare,
539 	.enable = ibx_pch_dpll_enable,
540 	.disable = ibx_pch_dpll_disable,
541 	.get_hw_state = ibx_pch_dpll_get_hw_state,
542 };
543 
544 static const struct dpll_info pch_plls[] = {
545 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
546 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
547 	{ },
548 };
549 
550 static const struct intel_dpll_mgr pch_pll_mgr = {
551 	.dpll_info = pch_plls,
552 	.get_dplls = ibx_get_dpll,
553 	.put_dplls = intel_put_dpll,
554 	.dump_hw_state = ibx_dump_hw_state,
555 };
556 
557 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
558 			       struct intel_shared_dpll *pll)
559 {
560 	const enum intel_dpll_id id = pll->info->id;
561 
562 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
563 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
564 	udelay(20);
565 }
566 
567 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
568 				struct intel_shared_dpll *pll)
569 {
570 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
571 	intel_de_posting_read(dev_priv, SPLL_CTL);
572 	udelay(20);
573 }
574 
575 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
576 				  struct intel_shared_dpll *pll)
577 {
578 	const enum intel_dpll_id id = pll->info->id;
579 	u32 val;
580 
581 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
582 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
583 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
584 
585 	/*
586 	 * Try to set up the PCH reference clock once all DPLLs
587 	 * that depend on it have been shut down.
588 	 */
589 	if (dev_priv->pch_ssc_use & BIT(id))
590 		intel_init_pch_refclk(dev_priv);
591 }
592 
593 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
594 				 struct intel_shared_dpll *pll)
595 {
596 	enum intel_dpll_id id = pll->info->id;
597 	u32 val;
598 
599 	val = intel_de_read(dev_priv, SPLL_CTL);
600 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
601 	intel_de_posting_read(dev_priv, SPLL_CTL);
602 
603 	/*
604 	 * Try to set up the PCH reference clock once all DPLLs
605 	 * that depend on it have been shut down.
606 	 */
607 	if (dev_priv->pch_ssc_use & BIT(id))
608 		intel_init_pch_refclk(dev_priv);
609 }
610 
611 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
612 				       struct intel_shared_dpll *pll,
613 				       struct intel_dpll_hw_state *hw_state)
614 {
615 	const enum intel_dpll_id id = pll->info->id;
616 	intel_wakeref_t wakeref;
617 	u32 val;
618 
619 	wakeref = intel_display_power_get_if_enabled(dev_priv,
620 						     POWER_DOMAIN_DISPLAY_CORE);
621 	if (!wakeref)
622 		return false;
623 
624 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
625 	hw_state->wrpll = val;
626 
627 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
628 
629 	return val & WRPLL_PLL_ENABLE;
630 }
631 
632 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
633 				      struct intel_shared_dpll *pll,
634 				      struct intel_dpll_hw_state *hw_state)
635 {
636 	intel_wakeref_t wakeref;
637 	u32 val;
638 
639 	wakeref = intel_display_power_get_if_enabled(dev_priv,
640 						     POWER_DOMAIN_DISPLAY_CORE);
641 	if (!wakeref)
642 		return false;
643 
644 	val = intel_de_read(dev_priv, SPLL_CTL);
645 	hw_state->spll = val;
646 
647 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
648 
649 	return val & SPLL_PLL_ENABLE;
650 }
651 
652 #define LC_FREQ 2700
653 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
654 
655 #define P_MIN 2
656 #define P_MAX 64
657 #define P_INC 2
658 
659 /* Constraints for PLL good behavior */
660 #define REF_MIN 48
661 #define REF_MAX 400
662 #define VCO_MIN 2400
663 #define VCO_MAX 4800
664 
665 struct hsw_wrpll_rnp {
666 	unsigned p, n2, r2;
667 };
668 
669 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
670 {
671 	unsigned budget;
672 
673 	switch (clock) {
674 	case 25175000:
675 	case 25200000:
676 	case 27000000:
677 	case 27027000:
678 	case 37762500:
679 	case 37800000:
680 	case 40500000:
681 	case 40541000:
682 	case 54000000:
683 	case 54054000:
684 	case 59341000:
685 	case 59400000:
686 	case 72000000:
687 	case 74176000:
688 	case 74250000:
689 	case 81000000:
690 	case 81081000:
691 	case 89012000:
692 	case 89100000:
693 	case 108000000:
694 	case 108108000:
695 	case 111264000:
696 	case 111375000:
697 	case 148352000:
698 	case 148500000:
699 	case 162000000:
700 	case 162162000:
701 	case 222525000:
702 	case 222750000:
703 	case 296703000:
704 	case 297000000:
705 		budget = 0;
706 		break;
707 	case 233500000:
708 	case 245250000:
709 	case 247750000:
710 	case 253250000:
711 	case 298000000:
712 		budget = 1500;
713 		break;
714 	case 169128000:
715 	case 169500000:
716 	case 179500000:
717 	case 202000000:
718 		budget = 2000;
719 		break;
720 	case 256250000:
721 	case 262500000:
722 	case 270000000:
723 	case 272500000:
724 	case 273750000:
725 	case 280750000:
726 	case 281250000:
727 	case 286000000:
728 	case 291750000:
729 		budget = 4000;
730 		break;
731 	case 267250000:
732 	case 268500000:
733 		budget = 5000;
734 		break;
735 	default:
736 		budget = 1000;
737 		break;
738 	}
739 
740 	return budget;
741 }
742 
743 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
744 				 unsigned int r2, unsigned int n2,
745 				 unsigned int p,
746 				 struct hsw_wrpll_rnp *best)
747 {
748 	u64 a, b, c, d, diff, diff_best;
749 
750 	/* No best (r,n,p) yet */
751 	if (best->p == 0) {
752 		best->p = p;
753 		best->n2 = n2;
754 		best->r2 = r2;
755 		return;
756 	}
757 
758 	/*
759 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
760 	 * freq2k.
761 	 *
762 	 * delta = 1e6 *
763 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
764 	 *	   freq2k;
765 	 *
766 	 * and we would like delta <= budget.
767 	 *
768 	 * If the discrepancy is above the PPM-based budget, always prefer to
769 	 * improve upon the previous solution.  However, if you're within the
770 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
771 	 */
772 	a = freq2k * budget * p * r2;
773 	b = freq2k * budget * best->p * best->r2;
774 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
775 	diff_best = abs_diff(freq2k * best->p * best->r2,
776 			     LC_FREQ_2K * best->n2);
777 	c = 1000000 * diff;
778 	d = 1000000 * diff_best;
779 
780 	if (a < c && b < d) {
781 		/* If both are above the budget, pick the closer */
782 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
783 			best->p = p;
784 			best->n2 = n2;
785 			best->r2 = r2;
786 		}
787 	} else if (a >= c && b < d) {
788 		/* If A is below the threshold but B is above it?  Update. */
789 		best->p = p;
790 		best->n2 = n2;
791 		best->r2 = r2;
792 	} else if (a >= c && b >= d) {
793 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
794 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
795 			best->p = p;
796 			best->n2 = n2;
797 			best->r2 = r2;
798 		}
799 	}
800 	/* Otherwise a < c && b >= d, do nothing */
801 }
802 
803 static void
804 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
805 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
806 {
807 	u64 freq2k;
808 	unsigned p, n2, r2;
809 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
810 	unsigned budget;
811 
812 	freq2k = clock / 100;
813 
814 	budget = hsw_wrpll_get_budget_for_freq(clock);
815 
816 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
817 	 * and directly pass the LC PLL to it. */
818 	if (freq2k == 5400000) {
819 		*n2_out = 2;
820 		*p_out = 1;
821 		*r2_out = 2;
822 		return;
823 	}
824 
825 	/*
826 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
827 	 * the WR PLL.
828 	 *
829 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
830 	 * Injecting R2 = 2 * R gives:
831 	 *   REF_MAX * r2 > LC_FREQ * 2 and
832 	 *   REF_MIN * r2 < LC_FREQ * 2
833 	 *
834 	 * Which means the desired boundaries for r2 are:
835 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
836 	 *
837 	 */
838 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
839 	     r2 <= LC_FREQ * 2 / REF_MIN;
840 	     r2++) {
841 
842 		/*
843 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
844 		 *
845 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
846 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
847 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
848 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
849 		 *
850 		 * Which means the desired boundaries for n2 are:
851 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
852 		 */
853 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
854 		     n2 <= VCO_MAX * r2 / LC_FREQ;
855 		     n2++) {
856 
857 			for (p = P_MIN; p <= P_MAX; p += P_INC)
858 				hsw_wrpll_update_rnp(freq2k, budget,
859 						     r2, n2, p, &best);
860 		}
861 	}
862 
863 	*n2_out = best.n2;
864 	*p_out = best.p;
865 	*r2_out = best.r2;
866 }
867 
868 static struct intel_shared_dpll *
869 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
870 		       struct intel_crtc *crtc)
871 {
872 	struct intel_crtc_state *crtc_state =
873 		intel_atomic_get_new_crtc_state(state, crtc);
874 	struct intel_shared_dpll *pll;
875 	u32 val;
876 	unsigned int p, n2, r2;
877 
878 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
879 
880 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
881 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
882 	      WRPLL_DIVIDER_POST(p);
883 
884 	crtc_state->dpll_hw_state.wrpll = val;
885 
886 	pll = intel_find_shared_dpll(state, crtc,
887 				     &crtc_state->dpll_hw_state,
888 				     BIT(DPLL_ID_WRPLL2) |
889 				     BIT(DPLL_ID_WRPLL1));
890 
891 	if (!pll)
892 		return NULL;
893 
894 	return pll;
895 }
896 
897 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
898 				  const struct intel_shared_dpll *pll,
899 				  const struct intel_dpll_hw_state *pll_state)
900 {
901 	int refclk;
902 	int n, p, r;
903 	u32 wrpll = pll_state->wrpll;
904 
905 	switch (wrpll & WRPLL_REF_MASK) {
906 	case WRPLL_REF_SPECIAL_HSW:
907 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
908 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
909 			refclk = dev_priv->dpll.ref_clks.nssc;
910 			break;
911 		}
912 		fallthrough;
913 	case WRPLL_REF_PCH_SSC:
914 		/*
915 		 * We could calculate spread here, but our checking
916 		 * code only cares about 5% accuracy, and spread is a max of
917 		 * 0.5% downspread.
918 		 */
919 		refclk = dev_priv->dpll.ref_clks.ssc;
920 		break;
921 	case WRPLL_REF_LCPLL:
922 		refclk = 2700000;
923 		break;
924 	default:
925 		MISSING_CASE(wrpll);
926 		return 0;
927 	}
928 
929 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
930 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
931 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
932 
933 	/* Convert to KHz, p & r have a fixed point portion */
934 	return (refclk * n / 10) / (p * r) * 2;
935 }
936 
937 static struct intel_shared_dpll *
938 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
939 {
940 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
941 	struct intel_shared_dpll *pll;
942 	enum intel_dpll_id pll_id;
943 	int clock = crtc_state->port_clock;
944 
945 	switch (clock / 2) {
946 	case 81000:
947 		pll_id = DPLL_ID_LCPLL_810;
948 		break;
949 	case 135000:
950 		pll_id = DPLL_ID_LCPLL_1350;
951 		break;
952 	case 270000:
953 		pll_id = DPLL_ID_LCPLL_2700;
954 		break;
955 	default:
956 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
957 			    clock);
958 		return NULL;
959 	}
960 
961 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
962 
963 	if (!pll)
964 		return NULL;
965 
966 	return pll;
967 }
968 
969 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
970 				  const struct intel_shared_dpll *pll,
971 				  const struct intel_dpll_hw_state *pll_state)
972 {
973 	int link_clock = 0;
974 
975 	switch (pll->info->id) {
976 	case DPLL_ID_LCPLL_810:
977 		link_clock = 81000;
978 		break;
979 	case DPLL_ID_LCPLL_1350:
980 		link_clock = 135000;
981 		break;
982 	case DPLL_ID_LCPLL_2700:
983 		link_clock = 270000;
984 		break;
985 	default:
986 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
987 		break;
988 	}
989 
990 	return link_clock * 2;
991 }
992 
993 static struct intel_shared_dpll *
994 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
995 		      struct intel_crtc *crtc)
996 {
997 	struct intel_crtc_state *crtc_state =
998 		intel_atomic_get_new_crtc_state(state, crtc);
999 
1000 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1001 		return NULL;
1002 
1003 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1004 					 SPLL_REF_MUXED_SSC;
1005 
1006 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1007 				      BIT(DPLL_ID_SPLL));
1008 }
1009 
1010 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1011 				 const struct intel_shared_dpll *pll,
1012 				 const struct intel_dpll_hw_state *pll_state)
1013 {
1014 	int link_clock = 0;
1015 
1016 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1017 	case SPLL_FREQ_810MHz:
1018 		link_clock = 81000;
1019 		break;
1020 	case SPLL_FREQ_1350MHz:
1021 		link_clock = 135000;
1022 		break;
1023 	case SPLL_FREQ_2700MHz:
1024 		link_clock = 270000;
1025 		break;
1026 	default:
1027 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1028 		break;
1029 	}
1030 
1031 	return link_clock * 2;
1032 }
1033 
1034 static bool hsw_get_dpll(struct intel_atomic_state *state,
1035 			 struct intel_crtc *crtc,
1036 			 struct intel_encoder *encoder)
1037 {
1038 	struct intel_crtc_state *crtc_state =
1039 		intel_atomic_get_new_crtc_state(state, crtc);
1040 	struct intel_shared_dpll *pll;
1041 
1042 	memset(&crtc_state->dpll_hw_state, 0,
1043 	       sizeof(crtc_state->dpll_hw_state));
1044 
1045 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1046 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1047 	else if (intel_crtc_has_dp_encoder(crtc_state))
1048 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1049 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1050 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1051 	else
1052 		return false;
1053 
1054 	if (!pll)
1055 		return false;
1056 
1057 	intel_reference_shared_dpll(state, crtc,
1058 				    pll, &crtc_state->dpll_hw_state);
1059 
1060 	crtc_state->shared_dpll = pll;
1061 
1062 	return true;
1063 }
1064 
1065 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1066 {
1067 	i915->dpll.ref_clks.ssc = 135000;
1068 	/* Non-SSC is only used on non-ULT HSW. */
1069 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1070 		i915->dpll.ref_clks.nssc = 24000;
1071 	else
1072 		i915->dpll.ref_clks.nssc = 135000;
1073 }
1074 
1075 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1076 			      const struct intel_dpll_hw_state *hw_state)
1077 {
1078 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1079 		    hw_state->wrpll, hw_state->spll);
1080 }
1081 
1082 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1083 	.enable = hsw_ddi_wrpll_enable,
1084 	.disable = hsw_ddi_wrpll_disable,
1085 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1086 	.get_freq = hsw_ddi_wrpll_get_freq,
1087 };
1088 
1089 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1090 	.enable = hsw_ddi_spll_enable,
1091 	.disable = hsw_ddi_spll_disable,
1092 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1093 	.get_freq = hsw_ddi_spll_get_freq,
1094 };
1095 
1096 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1097 				 struct intel_shared_dpll *pll)
1098 {
1099 }
1100 
1101 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1102 				  struct intel_shared_dpll *pll)
1103 {
1104 }
1105 
1106 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1107 				       struct intel_shared_dpll *pll,
1108 				       struct intel_dpll_hw_state *hw_state)
1109 {
1110 	return true;
1111 }
1112 
1113 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1114 	.enable = hsw_ddi_lcpll_enable,
1115 	.disable = hsw_ddi_lcpll_disable,
1116 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1117 	.get_freq = hsw_ddi_lcpll_get_freq,
1118 };
1119 
1120 static const struct dpll_info hsw_plls[] = {
1121 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1122 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1123 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1124 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1125 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1126 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1127 	{ },
1128 };
1129 
1130 static const struct intel_dpll_mgr hsw_pll_mgr = {
1131 	.dpll_info = hsw_plls,
1132 	.get_dplls = hsw_get_dpll,
1133 	.put_dplls = intel_put_dpll,
1134 	.update_ref_clks = hsw_update_dpll_ref_clks,
1135 	.dump_hw_state = hsw_dump_hw_state,
1136 };
1137 
1138 struct skl_dpll_regs {
1139 	i915_reg_t ctl, cfgcr1, cfgcr2;
1140 };
1141 
1142 /* this array is indexed by the *shared* pll id */
1143 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1144 	{
1145 		/* DPLL 0 */
1146 		.ctl = LCPLL1_CTL,
1147 		/* DPLL 0 doesn't support HDMI mode */
1148 	},
1149 	{
1150 		/* DPLL 1 */
1151 		.ctl = LCPLL2_CTL,
1152 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1153 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1154 	},
1155 	{
1156 		/* DPLL 2 */
1157 		.ctl = WRPLL_CTL(0),
1158 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1159 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1160 	},
1161 	{
1162 		/* DPLL 3 */
1163 		.ctl = WRPLL_CTL(1),
1164 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1165 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1166 	},
1167 };
1168 
1169 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1170 				    struct intel_shared_dpll *pll)
1171 {
1172 	const enum intel_dpll_id id = pll->info->id;
1173 	u32 val;
1174 
1175 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1176 
1177 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1178 		 DPLL_CTRL1_SSC(id) |
1179 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1180 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1181 
1182 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1183 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1184 }
1185 
1186 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1187 			       struct intel_shared_dpll *pll)
1188 {
1189 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1190 	const enum intel_dpll_id id = pll->info->id;
1191 
1192 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1193 
1194 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1195 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1196 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1197 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1198 
1199 	/* the enable bit is always bit 31 */
1200 	intel_de_write(dev_priv, regs[id].ctl,
1201 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1202 
1203 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1204 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1205 }
1206 
1207 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1208 				 struct intel_shared_dpll *pll)
1209 {
1210 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1211 }
1212 
1213 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1214 				struct intel_shared_dpll *pll)
1215 {
1216 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1217 	const enum intel_dpll_id id = pll->info->id;
1218 
1219 	/* the enable bit is always bit 31 */
1220 	intel_de_write(dev_priv, regs[id].ctl,
1221 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1222 	intel_de_posting_read(dev_priv, regs[id].ctl);
1223 }
1224 
1225 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1226 				  struct intel_shared_dpll *pll)
1227 {
1228 }
1229 
1230 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1231 				     struct intel_shared_dpll *pll,
1232 				     struct intel_dpll_hw_state *hw_state)
1233 {
1234 	u32 val;
1235 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1236 	const enum intel_dpll_id id = pll->info->id;
1237 	intel_wakeref_t wakeref;
1238 	bool ret;
1239 
1240 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1241 						     POWER_DOMAIN_DISPLAY_CORE);
1242 	if (!wakeref)
1243 		return false;
1244 
1245 	ret = false;
1246 
1247 	val = intel_de_read(dev_priv, regs[id].ctl);
1248 	if (!(val & LCPLL_PLL_ENABLE))
1249 		goto out;
1250 
1251 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1252 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1253 
1254 	/* avoid reading back stale values if HDMI mode is not enabled */
1255 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1256 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1257 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1258 	}
1259 	ret = true;
1260 
1261 out:
1262 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1263 
1264 	return ret;
1265 }
1266 
1267 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1268 				       struct intel_shared_dpll *pll,
1269 				       struct intel_dpll_hw_state *hw_state)
1270 {
1271 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1272 	const enum intel_dpll_id id = pll->info->id;
1273 	intel_wakeref_t wakeref;
1274 	u32 val;
1275 	bool ret;
1276 
1277 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1278 						     POWER_DOMAIN_DISPLAY_CORE);
1279 	if (!wakeref)
1280 		return false;
1281 
1282 	ret = false;
1283 
1284 	/* DPLL0 is always enabled since it drives CDCLK */
1285 	val = intel_de_read(dev_priv, regs[id].ctl);
1286 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1287 		goto out;
1288 
1289 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1290 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1291 
1292 	ret = true;
1293 
1294 out:
1295 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1296 
1297 	return ret;
1298 }
1299 
1300 struct skl_wrpll_context {
1301 	u64 min_deviation;		/* current minimal deviation */
1302 	u64 central_freq;		/* chosen central freq */
1303 	u64 dco_freq;			/* chosen dco freq */
1304 	unsigned int p;			/* chosen divider */
1305 };
1306 
1307 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1308 {
1309 	memset(ctx, 0, sizeof(*ctx));
1310 
1311 	ctx->min_deviation = U64_MAX;
1312 }
1313 
1314 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1315 #define SKL_DCO_MAX_PDEVIATION	100
1316 #define SKL_DCO_MAX_NDEVIATION	600
1317 
1318 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1319 				  u64 central_freq,
1320 				  u64 dco_freq,
1321 				  unsigned int divider)
1322 {
1323 	u64 deviation;
1324 
1325 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1326 			      central_freq);
1327 
1328 	/* positive deviation */
1329 	if (dco_freq >= central_freq) {
1330 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1331 		    deviation < ctx->min_deviation) {
1332 			ctx->min_deviation = deviation;
1333 			ctx->central_freq = central_freq;
1334 			ctx->dco_freq = dco_freq;
1335 			ctx->p = divider;
1336 		}
1337 	/* negative deviation */
1338 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1339 		   deviation < ctx->min_deviation) {
1340 		ctx->min_deviation = deviation;
1341 		ctx->central_freq = central_freq;
1342 		ctx->dco_freq = dco_freq;
1343 		ctx->p = divider;
1344 	}
1345 }
1346 
1347 static void skl_wrpll_get_multipliers(unsigned int p,
1348 				      unsigned int *p0 /* out */,
1349 				      unsigned int *p1 /* out */,
1350 				      unsigned int *p2 /* out */)
1351 {
1352 	/* even dividers */
1353 	if (p % 2 == 0) {
1354 		unsigned int half = p / 2;
1355 
1356 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1357 			*p0 = 2;
1358 			*p1 = 1;
1359 			*p2 = half;
1360 		} else if (half % 2 == 0) {
1361 			*p0 = 2;
1362 			*p1 = half / 2;
1363 			*p2 = 2;
1364 		} else if (half % 3 == 0) {
1365 			*p0 = 3;
1366 			*p1 = half / 3;
1367 			*p2 = 2;
1368 		} else if (half % 7 == 0) {
1369 			*p0 = 7;
1370 			*p1 = half / 7;
1371 			*p2 = 2;
1372 		}
1373 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1374 		*p0 = 3;
1375 		*p1 = 1;
1376 		*p2 = p / 3;
1377 	} else if (p == 5 || p == 7) {
1378 		*p0 = p;
1379 		*p1 = 1;
1380 		*p2 = 1;
1381 	} else if (p == 15) {
1382 		*p0 = 3;
1383 		*p1 = 1;
1384 		*p2 = 5;
1385 	} else if (p == 21) {
1386 		*p0 = 7;
1387 		*p1 = 1;
1388 		*p2 = 3;
1389 	} else if (p == 35) {
1390 		*p0 = 7;
1391 		*p1 = 1;
1392 		*p2 = 5;
1393 	}
1394 }
1395 
1396 struct skl_wrpll_params {
1397 	u32 dco_fraction;
1398 	u32 dco_integer;
1399 	u32 qdiv_ratio;
1400 	u32 qdiv_mode;
1401 	u32 kdiv;
1402 	u32 pdiv;
1403 	u32 central_freq;
1404 };
1405 
1406 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1407 				      u64 afe_clock,
1408 				      int ref_clock,
1409 				      u64 central_freq,
1410 				      u32 p0, u32 p1, u32 p2)
1411 {
1412 	u64 dco_freq;
1413 
1414 	switch (central_freq) {
1415 	case 9600000000ULL:
1416 		params->central_freq = 0;
1417 		break;
1418 	case 9000000000ULL:
1419 		params->central_freq = 1;
1420 		break;
1421 	case 8400000000ULL:
1422 		params->central_freq = 3;
1423 	}
1424 
1425 	switch (p0) {
1426 	case 1:
1427 		params->pdiv = 0;
1428 		break;
1429 	case 2:
1430 		params->pdiv = 1;
1431 		break;
1432 	case 3:
1433 		params->pdiv = 2;
1434 		break;
1435 	case 7:
1436 		params->pdiv = 4;
1437 		break;
1438 	default:
1439 		WARN(1, "Incorrect PDiv\n");
1440 	}
1441 
1442 	switch (p2) {
1443 	case 5:
1444 		params->kdiv = 0;
1445 		break;
1446 	case 2:
1447 		params->kdiv = 1;
1448 		break;
1449 	case 3:
1450 		params->kdiv = 2;
1451 		break;
1452 	case 1:
1453 		params->kdiv = 3;
1454 		break;
1455 	default:
1456 		WARN(1, "Incorrect KDiv\n");
1457 	}
1458 
1459 	params->qdiv_ratio = p1;
1460 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1461 
1462 	dco_freq = p0 * p1 * p2 * afe_clock;
1463 
1464 	/*
1465 	 * Intermediate values are in Hz.
1466 	 * Divide by MHz to match bsepc
1467 	 */
1468 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1469 	params->dco_fraction =
1470 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1471 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1472 }
1473 
1474 static bool
1475 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1476 			int ref_clock,
1477 			struct skl_wrpll_params *wrpll_params)
1478 {
1479 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1480 	u64 dco_central_freq[3] = { 8400000000ULL,
1481 				    9000000000ULL,
1482 				    9600000000ULL };
1483 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1484 					     24, 28, 30, 32, 36, 40, 42, 44,
1485 					     48, 52, 54, 56, 60, 64, 66, 68,
1486 					     70, 72, 76, 78, 80, 84, 88, 90,
1487 					     92, 96, 98 };
1488 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1489 	static const struct {
1490 		const int *list;
1491 		int n_dividers;
1492 	} dividers[] = {
1493 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1494 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1495 	};
1496 	struct skl_wrpll_context ctx;
1497 	unsigned int dco, d, i;
1498 	unsigned int p0, p1, p2;
1499 
1500 	skl_wrpll_context_init(&ctx);
1501 
1502 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1503 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1504 			for (i = 0; i < dividers[d].n_dividers; i++) {
1505 				unsigned int p = dividers[d].list[i];
1506 				u64 dco_freq = p * afe_clock;
1507 
1508 				skl_wrpll_try_divider(&ctx,
1509 						      dco_central_freq[dco],
1510 						      dco_freq,
1511 						      p);
1512 				/*
1513 				 * Skip the remaining dividers if we're sure to
1514 				 * have found the definitive divider, we can't
1515 				 * improve a 0 deviation.
1516 				 */
1517 				if (ctx.min_deviation == 0)
1518 					goto skip_remaining_dividers;
1519 			}
1520 		}
1521 
1522 skip_remaining_dividers:
1523 		/*
1524 		 * If a solution is found with an even divider, prefer
1525 		 * this one.
1526 		 */
1527 		if (d == 0 && ctx.p)
1528 			break;
1529 	}
1530 
1531 	if (!ctx.p) {
1532 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1533 		return false;
1534 	}
1535 
1536 	/*
1537 	 * gcc incorrectly analyses that these can be used without being
1538 	 * initialized. To be fair, it's hard to guess.
1539 	 */
1540 	p0 = p1 = p2 = 0;
1541 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1542 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1543 				  ctx.central_freq, p0, p1, p2);
1544 
1545 	return true;
1546 }
1547 
1548 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1549 {
1550 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1551 	u32 ctrl1, cfgcr1, cfgcr2;
1552 	struct skl_wrpll_params wrpll_params = { 0, };
1553 
1554 	/*
1555 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1556 	 * as the DPLL id in this function.
1557 	 */
1558 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1559 
1560 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1561 
1562 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1563 				     i915->dpll.ref_clks.nssc,
1564 				     &wrpll_params))
1565 		return false;
1566 
1567 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1568 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1569 		wrpll_params.dco_integer;
1570 
1571 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1572 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1573 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1574 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1575 		wrpll_params.central_freq;
1576 
1577 	memset(&crtc_state->dpll_hw_state, 0,
1578 	       sizeof(crtc_state->dpll_hw_state));
1579 
1580 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1581 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1582 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1583 	return true;
1584 }
1585 
1586 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1587 				  const struct intel_shared_dpll *pll,
1588 				  const struct intel_dpll_hw_state *pll_state)
1589 {
1590 	int ref_clock = i915->dpll.ref_clks.nssc;
1591 	u32 p0, p1, p2, dco_freq;
1592 
1593 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1594 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1595 
1596 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1597 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1598 	else
1599 		p1 = 1;
1600 
1601 
1602 	switch (p0) {
1603 	case DPLL_CFGCR2_PDIV_1:
1604 		p0 = 1;
1605 		break;
1606 	case DPLL_CFGCR2_PDIV_2:
1607 		p0 = 2;
1608 		break;
1609 	case DPLL_CFGCR2_PDIV_3:
1610 		p0 = 3;
1611 		break;
1612 	case DPLL_CFGCR2_PDIV_7_INVALID:
1613 		/*
1614 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1615 		 * handling it the same way as PDIV_7.
1616 		 */
1617 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1618 		fallthrough;
1619 	case DPLL_CFGCR2_PDIV_7:
1620 		p0 = 7;
1621 		break;
1622 	default:
1623 		MISSING_CASE(p0);
1624 		return 0;
1625 	}
1626 
1627 	switch (p2) {
1628 	case DPLL_CFGCR2_KDIV_5:
1629 		p2 = 5;
1630 		break;
1631 	case DPLL_CFGCR2_KDIV_2:
1632 		p2 = 2;
1633 		break;
1634 	case DPLL_CFGCR2_KDIV_3:
1635 		p2 = 3;
1636 		break;
1637 	case DPLL_CFGCR2_KDIV_1:
1638 		p2 = 1;
1639 		break;
1640 	default:
1641 		MISSING_CASE(p2);
1642 		return 0;
1643 	}
1644 
1645 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1646 		   ref_clock;
1647 
1648 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1649 		    ref_clock / 0x8000;
1650 
1651 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1652 		return 0;
1653 
1654 	return dco_freq / (p0 * p1 * p2 * 5);
1655 }
1656 
1657 static bool
1658 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1659 {
1660 	u32 ctrl1;
1661 
1662 	/*
1663 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1664 	 * as the DPLL id in this function.
1665 	 */
1666 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1667 	switch (crtc_state->port_clock / 2) {
1668 	case 81000:
1669 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1670 		break;
1671 	case 135000:
1672 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1673 		break;
1674 	case 270000:
1675 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1676 		break;
1677 		/* eDP 1.4 rates */
1678 	case 162000:
1679 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1680 		break;
1681 	case 108000:
1682 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1683 		break;
1684 	case 216000:
1685 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1686 		break;
1687 	}
1688 
1689 	memset(&crtc_state->dpll_hw_state, 0,
1690 	       sizeof(crtc_state->dpll_hw_state));
1691 
1692 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1693 
1694 	return true;
1695 }
1696 
1697 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1698 				  const struct intel_shared_dpll *pll,
1699 				  const struct intel_dpll_hw_state *pll_state)
1700 {
1701 	int link_clock = 0;
1702 
1703 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1704 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1705 	case DPLL_CTRL1_LINK_RATE_810:
1706 		link_clock = 81000;
1707 		break;
1708 	case DPLL_CTRL1_LINK_RATE_1080:
1709 		link_clock = 108000;
1710 		break;
1711 	case DPLL_CTRL1_LINK_RATE_1350:
1712 		link_clock = 135000;
1713 		break;
1714 	case DPLL_CTRL1_LINK_RATE_1620:
1715 		link_clock = 162000;
1716 		break;
1717 	case DPLL_CTRL1_LINK_RATE_2160:
1718 		link_clock = 216000;
1719 		break;
1720 	case DPLL_CTRL1_LINK_RATE_2700:
1721 		link_clock = 270000;
1722 		break;
1723 	default:
1724 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1725 		break;
1726 	}
1727 
1728 	return link_clock * 2;
1729 }
1730 
1731 static bool skl_get_dpll(struct intel_atomic_state *state,
1732 			 struct intel_crtc *crtc,
1733 			 struct intel_encoder *encoder)
1734 {
1735 	struct intel_crtc_state *crtc_state =
1736 		intel_atomic_get_new_crtc_state(state, crtc);
1737 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1738 	struct intel_shared_dpll *pll;
1739 	bool bret;
1740 
1741 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1742 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1743 		if (!bret) {
1744 			drm_dbg_kms(&i915->drm,
1745 				    "Could not get HDMI pll dividers.\n");
1746 			return false;
1747 		}
1748 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1749 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1750 		if (!bret) {
1751 			drm_dbg_kms(&i915->drm,
1752 				    "Could not set DP dpll HW state.\n");
1753 			return false;
1754 		}
1755 	} else {
1756 		return false;
1757 	}
1758 
1759 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1760 		pll = intel_find_shared_dpll(state, crtc,
1761 					     &crtc_state->dpll_hw_state,
1762 					     BIT(DPLL_ID_SKL_DPLL0));
1763 	else
1764 		pll = intel_find_shared_dpll(state, crtc,
1765 					     &crtc_state->dpll_hw_state,
1766 					     BIT(DPLL_ID_SKL_DPLL3) |
1767 					     BIT(DPLL_ID_SKL_DPLL2) |
1768 					     BIT(DPLL_ID_SKL_DPLL1));
1769 	if (!pll)
1770 		return false;
1771 
1772 	intel_reference_shared_dpll(state, crtc,
1773 				    pll, &crtc_state->dpll_hw_state);
1774 
1775 	crtc_state->shared_dpll = pll;
1776 
1777 	return true;
1778 }
1779 
1780 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1781 				const struct intel_shared_dpll *pll,
1782 				const struct intel_dpll_hw_state *pll_state)
1783 {
1784 	/*
1785 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1786 	 * the internal shift for each field
1787 	 */
1788 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1789 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1790 	else
1791 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1792 }
1793 
1794 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1795 {
1796 	/* No SSC ref */
1797 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1798 }
1799 
1800 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1801 			      const struct intel_dpll_hw_state *hw_state)
1802 {
1803 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1804 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1805 		      hw_state->ctrl1,
1806 		      hw_state->cfgcr1,
1807 		      hw_state->cfgcr2);
1808 }
1809 
1810 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1811 	.enable = skl_ddi_pll_enable,
1812 	.disable = skl_ddi_pll_disable,
1813 	.get_hw_state = skl_ddi_pll_get_hw_state,
1814 	.get_freq = skl_ddi_pll_get_freq,
1815 };
1816 
1817 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1818 	.enable = skl_ddi_dpll0_enable,
1819 	.disable = skl_ddi_dpll0_disable,
1820 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1821 	.get_freq = skl_ddi_pll_get_freq,
1822 };
1823 
1824 static const struct dpll_info skl_plls[] = {
1825 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1826 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1827 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1828 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1829 	{ },
1830 };
1831 
1832 static const struct intel_dpll_mgr skl_pll_mgr = {
1833 	.dpll_info = skl_plls,
1834 	.get_dplls = skl_get_dpll,
1835 	.put_dplls = intel_put_dpll,
1836 	.update_ref_clks = skl_update_dpll_ref_clks,
1837 	.dump_hw_state = skl_dump_hw_state,
1838 };
1839 
1840 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1841 				struct intel_shared_dpll *pll)
1842 {
1843 	u32 temp;
1844 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1845 	enum dpio_phy phy;
1846 	enum dpio_channel ch;
1847 
1848 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1849 
1850 	/* Non-SSC reference */
1851 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1852 	temp |= PORT_PLL_REF_SEL;
1853 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1854 
1855 	if (IS_GEMINILAKE(dev_priv)) {
1856 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1857 		temp |= PORT_PLL_POWER_ENABLE;
1858 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1859 
1860 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1861 				 PORT_PLL_POWER_STATE), 200))
1862 			drm_err(&dev_priv->drm,
1863 				"Power state not set for PLL:%d\n", port);
1864 	}
1865 
1866 	/* Disable 10 bit clock */
1867 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1868 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1869 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1870 
1871 	/* Write P1 & P2 */
1872 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1873 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1874 	temp |= pll->state.hw_state.ebb0;
1875 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1876 
1877 	/* Write M2 integer */
1878 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1879 	temp &= ~PORT_PLL_M2_MASK;
1880 	temp |= pll->state.hw_state.pll0;
1881 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1882 
1883 	/* Write N */
1884 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1885 	temp &= ~PORT_PLL_N_MASK;
1886 	temp |= pll->state.hw_state.pll1;
1887 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1888 
1889 	/* Write M2 fraction */
1890 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1891 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1892 	temp |= pll->state.hw_state.pll2;
1893 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1894 
1895 	/* Write M2 fraction enable */
1896 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1897 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1898 	temp |= pll->state.hw_state.pll3;
1899 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1900 
1901 	/* Write coeff */
1902 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1903 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1904 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1905 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1906 	temp |= pll->state.hw_state.pll6;
1907 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1908 
1909 	/* Write calibration val */
1910 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1911 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1912 	temp |= pll->state.hw_state.pll8;
1913 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1914 
1915 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1916 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1917 	temp |= pll->state.hw_state.pll9;
1918 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1919 
1920 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1921 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1922 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1923 	temp |= pll->state.hw_state.pll10;
1924 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1925 
1926 	/* Recalibrate with new settings */
1927 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1928 	temp |= PORT_PLL_RECALIBRATE;
1929 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1930 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1931 	temp |= pll->state.hw_state.ebb4;
1932 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1933 
1934 	/* Enable PLL */
1935 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1936 	temp |= PORT_PLL_ENABLE;
1937 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1938 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1939 
1940 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1941 			200))
1942 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1943 
1944 	if (IS_GEMINILAKE(dev_priv)) {
1945 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1946 		temp |= DCC_DELAY_RANGE_2;
1947 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1948 	}
1949 
1950 	/*
1951 	 * While we write to the group register to program all lanes at once we
1952 	 * can read only lane registers and we pick lanes 0/1 for that.
1953 	 */
1954 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1955 	temp &= ~LANE_STAGGER_MASK;
1956 	temp &= ~LANESTAGGER_STRAP_OVRD;
1957 	temp |= pll->state.hw_state.pcsdw12;
1958 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1959 }
1960 
1961 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1962 					struct intel_shared_dpll *pll)
1963 {
1964 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1965 	u32 temp;
1966 
1967 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1968 	temp &= ~PORT_PLL_ENABLE;
1969 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1970 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1971 
1972 	if (IS_GEMINILAKE(dev_priv)) {
1973 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1974 		temp &= ~PORT_PLL_POWER_ENABLE;
1975 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1976 
1977 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1978 				  PORT_PLL_POWER_STATE), 200))
1979 			drm_err(&dev_priv->drm,
1980 				"Power state not reset for PLL:%d\n", port);
1981 	}
1982 }
1983 
1984 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1985 					struct intel_shared_dpll *pll,
1986 					struct intel_dpll_hw_state *hw_state)
1987 {
1988 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1989 	intel_wakeref_t wakeref;
1990 	enum dpio_phy phy;
1991 	enum dpio_channel ch;
1992 	u32 val;
1993 	bool ret;
1994 
1995 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1996 
1997 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1998 						     POWER_DOMAIN_DISPLAY_CORE);
1999 	if (!wakeref)
2000 		return false;
2001 
2002 	ret = false;
2003 
2004 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2005 	if (!(val & PORT_PLL_ENABLE))
2006 		goto out;
2007 
2008 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2009 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2010 
2011 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2012 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2013 
2014 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2015 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2016 
2017 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2018 	hw_state->pll1 &= PORT_PLL_N_MASK;
2019 
2020 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2021 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2022 
2023 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2024 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2025 
2026 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2027 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2028 			  PORT_PLL_INT_COEFF_MASK |
2029 			  PORT_PLL_GAIN_CTL_MASK;
2030 
2031 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2032 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2033 
2034 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2035 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2036 
2037 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2038 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2039 			   PORT_PLL_DCO_AMP_MASK;
2040 
2041 	/*
2042 	 * While we write to the group register to program all lanes at once we
2043 	 * can read only lane registers. We configure all lanes the same way, so
2044 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2045 	 */
2046 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2047 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2048 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2049 		drm_dbg(&dev_priv->drm,
2050 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2051 			hw_state->pcsdw12,
2052 			intel_de_read(dev_priv,
2053 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2054 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2055 
2056 	ret = true;
2057 
2058 out:
2059 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2060 
2061 	return ret;
2062 }
2063 
2064 /* bxt clock parameters */
2065 struct bxt_clk_div {
2066 	int clock;
2067 	u32 p1;
2068 	u32 p2;
2069 	u32 m2_int;
2070 	u32 m2_frac;
2071 	bool m2_frac_en;
2072 	u32 n;
2073 
2074 	int vco;
2075 };
2076 
2077 /* pre-calculated values for DP linkrates */
2078 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2079 	{162000, 4, 2, 32, 1677722, 1, 1},
2080 	{270000, 4, 1, 27,       0, 0, 1},
2081 	{540000, 2, 1, 27,       0, 0, 1},
2082 	{216000, 3, 2, 32, 1677722, 1, 1},
2083 	{243000, 4, 1, 24, 1258291, 1, 1},
2084 	{324000, 4, 1, 32, 1677722, 1, 1},
2085 	{432000, 3, 1, 32, 1677722, 1, 1}
2086 };
2087 
2088 static bool
2089 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2090 			  struct bxt_clk_div *clk_div)
2091 {
2092 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2093 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2094 	struct dpll best_clock;
2095 
2096 	/* Calculate HDMI div */
2097 	/*
2098 	 * FIXME: tie the following calculation into
2099 	 * i9xx_crtc_compute_clock
2100 	 */
2101 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2102 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2103 			crtc_state->port_clock,
2104 			pipe_name(crtc->pipe));
2105 		return false;
2106 	}
2107 
2108 	clk_div->p1 = best_clock.p1;
2109 	clk_div->p2 = best_clock.p2;
2110 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2111 	clk_div->n = best_clock.n;
2112 	clk_div->m2_int = best_clock.m2 >> 22;
2113 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2114 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2115 
2116 	clk_div->vco = best_clock.vco;
2117 
2118 	return true;
2119 }
2120 
2121 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2122 				    struct bxt_clk_div *clk_div)
2123 {
2124 	int clock = crtc_state->port_clock;
2125 	int i;
2126 
2127 	*clk_div = bxt_dp_clk_val[0];
2128 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2129 		if (bxt_dp_clk_val[i].clock == clock) {
2130 			*clk_div = bxt_dp_clk_val[i];
2131 			break;
2132 		}
2133 	}
2134 
2135 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2136 }
2137 
2138 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2139 				      const struct bxt_clk_div *clk_div)
2140 {
2141 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2142 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2143 	int clock = crtc_state->port_clock;
2144 	int vco = clk_div->vco;
2145 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2146 	u32 lanestagger;
2147 
2148 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2149 
2150 	if (vco >= 6200000 && vco <= 6700000) {
2151 		prop_coef = 4;
2152 		int_coef = 9;
2153 		gain_ctl = 3;
2154 		targ_cnt = 8;
2155 	} else if ((vco > 5400000 && vco < 6200000) ||
2156 			(vco >= 4800000 && vco < 5400000)) {
2157 		prop_coef = 5;
2158 		int_coef = 11;
2159 		gain_ctl = 3;
2160 		targ_cnt = 9;
2161 	} else if (vco == 5400000) {
2162 		prop_coef = 3;
2163 		int_coef = 8;
2164 		gain_ctl = 1;
2165 		targ_cnt = 9;
2166 	} else {
2167 		drm_err(&i915->drm, "Invalid VCO\n");
2168 		return false;
2169 	}
2170 
2171 	if (clock > 270000)
2172 		lanestagger = 0x18;
2173 	else if (clock > 135000)
2174 		lanestagger = 0x0d;
2175 	else if (clock > 67000)
2176 		lanestagger = 0x07;
2177 	else if (clock > 33000)
2178 		lanestagger = 0x04;
2179 	else
2180 		lanestagger = 0x02;
2181 
2182 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2183 	dpll_hw_state->pll0 = clk_div->m2_int;
2184 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2185 	dpll_hw_state->pll2 = clk_div->m2_frac;
2186 
2187 	if (clk_div->m2_frac_en)
2188 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2189 
2190 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2191 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2192 
2193 	dpll_hw_state->pll8 = targ_cnt;
2194 
2195 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2196 
2197 	dpll_hw_state->pll10 =
2198 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2199 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2200 
2201 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2202 
2203 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2204 
2205 	return true;
2206 }
2207 
2208 static bool
2209 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2210 {
2211 	struct bxt_clk_div clk_div = {};
2212 
2213 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2214 
2215 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2216 }
2217 
2218 static bool
2219 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2220 {
2221 	struct bxt_clk_div clk_div = {};
2222 
2223 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2224 
2225 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2226 }
2227 
2228 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2229 				const struct intel_shared_dpll *pll,
2230 				const struct intel_dpll_hw_state *pll_state)
2231 {
2232 	struct dpll clock;
2233 
2234 	clock.m1 = 2;
2235 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2236 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2237 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2238 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2239 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2240 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2241 
2242 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2243 }
2244 
2245 static bool bxt_get_dpll(struct intel_atomic_state *state,
2246 			 struct intel_crtc *crtc,
2247 			 struct intel_encoder *encoder)
2248 {
2249 	struct intel_crtc_state *crtc_state =
2250 		intel_atomic_get_new_crtc_state(state, crtc);
2251 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2252 	struct intel_shared_dpll *pll;
2253 	enum intel_dpll_id id;
2254 
2255 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2256 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2257 		return false;
2258 
2259 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2260 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2261 		return false;
2262 
2263 	/* 1:1 mapping between ports and PLLs */
2264 	id = (enum intel_dpll_id) encoder->port;
2265 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2266 
2267 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2268 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2269 
2270 	intel_reference_shared_dpll(state, crtc,
2271 				    pll, &crtc_state->dpll_hw_state);
2272 
2273 	crtc_state->shared_dpll = pll;
2274 
2275 	return true;
2276 }
2277 
2278 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2279 {
2280 	i915->dpll.ref_clks.ssc = 100000;
2281 	i915->dpll.ref_clks.nssc = 100000;
2282 	/* DSI non-SSC ref 19.2MHz */
2283 }
2284 
2285 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2286 			      const struct intel_dpll_hw_state *hw_state)
2287 {
2288 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2289 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2290 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2291 		    hw_state->ebb0,
2292 		    hw_state->ebb4,
2293 		    hw_state->pll0,
2294 		    hw_state->pll1,
2295 		    hw_state->pll2,
2296 		    hw_state->pll3,
2297 		    hw_state->pll6,
2298 		    hw_state->pll8,
2299 		    hw_state->pll9,
2300 		    hw_state->pll10,
2301 		    hw_state->pcsdw12);
2302 }
2303 
2304 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2305 	.enable = bxt_ddi_pll_enable,
2306 	.disable = bxt_ddi_pll_disable,
2307 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2308 	.get_freq = bxt_ddi_pll_get_freq,
2309 };
2310 
2311 static const struct dpll_info bxt_plls[] = {
2312 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2313 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2314 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2315 	{ },
2316 };
2317 
2318 static const struct intel_dpll_mgr bxt_pll_mgr = {
2319 	.dpll_info = bxt_plls,
2320 	.get_dplls = bxt_get_dpll,
2321 	.put_dplls = intel_put_dpll,
2322 	.update_ref_clks = bxt_update_dpll_ref_clks,
2323 	.dump_hw_state = bxt_dump_hw_state,
2324 };
2325 
2326 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2327 			       struct intel_shared_dpll *pll)
2328 {
2329 	const enum intel_dpll_id id = pll->info->id;
2330 	u32 val;
2331 
2332 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2333 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2334 	val |= PLL_POWER_ENABLE;
2335 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2336 
2337 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2338 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2339 				  PLL_POWER_STATE, 5))
2340 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2341 
2342 	/*
2343 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2344 	 * select DP mode, and set DP link rate.
2345 	 */
2346 	val = pll->state.hw_state.cfgcr0;
2347 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2348 
2349 	/* 4. Reab back to ensure writes completed */
2350 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2351 
2352 	/* 3. Configure DPLL_CFGCR0 */
2353 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2354 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2355 		val = pll->state.hw_state.cfgcr1;
2356 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2357 		/* 4. Reab back to ensure writes completed */
2358 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2359 	}
2360 
2361 	/*
2362 	 * 5. If the frequency will result in a change to the voltage
2363 	 * requirement, follow the Display Voltage Frequency Switching
2364 	 * Sequence Before Frequency Change
2365 	 *
2366 	 * Note: DVFS is actually handled via the cdclk code paths,
2367 	 * hence we do nothing here.
2368 	 */
2369 
2370 	/* 6. Enable DPLL in DPLL_ENABLE. */
2371 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2372 	val |= PLL_ENABLE;
2373 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2374 
2375 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2376 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2377 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2378 
2379 	/*
2380 	 * 8. If the frequency will result in a change to the voltage
2381 	 * requirement, follow the Display Voltage Frequency Switching
2382 	 * Sequence After Frequency Change
2383 	 *
2384 	 * Note: DVFS is actually handled via the cdclk code paths,
2385 	 * hence we do nothing here.
2386 	 */
2387 
2388 	/*
2389 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2390 	 * Done at intel_ddi_clk_select
2391 	 */
2392 }
2393 
2394 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2395 				struct intel_shared_dpll *pll)
2396 {
2397 	const enum intel_dpll_id id = pll->info->id;
2398 	u32 val;
2399 
2400 	/*
2401 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2402 	 * Done at intel_ddi_post_disable
2403 	 */
2404 
2405 	/*
2406 	 * 2. If the frequency will result in a change to the voltage
2407 	 * requirement, follow the Display Voltage Frequency Switching
2408 	 * Sequence Before Frequency Change
2409 	 *
2410 	 * Note: DVFS is actually handled via the cdclk code paths,
2411 	 * hence we do nothing here.
2412 	 */
2413 
2414 	/* 3. Disable DPLL through DPLL_ENABLE. */
2415 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2416 	val &= ~PLL_ENABLE;
2417 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2418 
2419 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2420 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2421 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2422 
2423 	/*
2424 	 * 5. If the frequency will result in a change to the voltage
2425 	 * requirement, follow the Display Voltage Frequency Switching
2426 	 * Sequence After Frequency Change
2427 	 *
2428 	 * Note: DVFS is actually handled via the cdclk code paths,
2429 	 * hence we do nothing here.
2430 	 */
2431 
2432 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2433 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2434 	val &= ~PLL_POWER_ENABLE;
2435 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2436 
2437 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2438 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2439 				    PLL_POWER_STATE, 5))
2440 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2441 }
2442 
2443 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2444 				     struct intel_shared_dpll *pll,
2445 				     struct intel_dpll_hw_state *hw_state)
2446 {
2447 	const enum intel_dpll_id id = pll->info->id;
2448 	intel_wakeref_t wakeref;
2449 	u32 val;
2450 	bool ret;
2451 
2452 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2453 						     POWER_DOMAIN_DISPLAY_CORE);
2454 	if (!wakeref)
2455 		return false;
2456 
2457 	ret = false;
2458 
2459 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2460 	if (!(val & PLL_ENABLE))
2461 		goto out;
2462 
2463 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2464 	hw_state->cfgcr0 = val;
2465 
2466 	/* avoid reading back stale values if HDMI mode is not enabled */
2467 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2468 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2469 						 CNL_DPLL_CFGCR1(id));
2470 	}
2471 	ret = true;
2472 
2473 out:
2474 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2475 
2476 	return ret;
2477 }
2478 
2479 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2480 				      int *qdiv, int *kdiv)
2481 {
2482 	/* even dividers */
2483 	if (bestdiv % 2 == 0) {
2484 		if (bestdiv == 2) {
2485 			*pdiv = 2;
2486 			*qdiv = 1;
2487 			*kdiv = 1;
2488 		} else if (bestdiv % 4 == 0) {
2489 			*pdiv = 2;
2490 			*qdiv = bestdiv / 4;
2491 			*kdiv = 2;
2492 		} else if (bestdiv % 6 == 0) {
2493 			*pdiv = 3;
2494 			*qdiv = bestdiv / 6;
2495 			*kdiv = 2;
2496 		} else if (bestdiv % 5 == 0) {
2497 			*pdiv = 5;
2498 			*qdiv = bestdiv / 10;
2499 			*kdiv = 2;
2500 		} else if (bestdiv % 14 == 0) {
2501 			*pdiv = 7;
2502 			*qdiv = bestdiv / 14;
2503 			*kdiv = 2;
2504 		}
2505 	} else {
2506 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2507 			*pdiv = bestdiv;
2508 			*qdiv = 1;
2509 			*kdiv = 1;
2510 		} else { /* 9, 15, 21 */
2511 			*pdiv = bestdiv / 3;
2512 			*qdiv = 1;
2513 			*kdiv = 3;
2514 		}
2515 	}
2516 }
2517 
2518 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2519 				      u32 dco_freq, u32 ref_freq,
2520 				      int pdiv, int qdiv, int kdiv)
2521 {
2522 	u32 dco;
2523 
2524 	switch (kdiv) {
2525 	case 1:
2526 		params->kdiv = 1;
2527 		break;
2528 	case 2:
2529 		params->kdiv = 2;
2530 		break;
2531 	case 3:
2532 		params->kdiv = 4;
2533 		break;
2534 	default:
2535 		WARN(1, "Incorrect KDiv\n");
2536 	}
2537 
2538 	switch (pdiv) {
2539 	case 2:
2540 		params->pdiv = 1;
2541 		break;
2542 	case 3:
2543 		params->pdiv = 2;
2544 		break;
2545 	case 5:
2546 		params->pdiv = 4;
2547 		break;
2548 	case 7:
2549 		params->pdiv = 8;
2550 		break;
2551 	default:
2552 		WARN(1, "Incorrect PDiv\n");
2553 	}
2554 
2555 	WARN_ON(kdiv != 2 && qdiv != 1);
2556 
2557 	params->qdiv_ratio = qdiv;
2558 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2559 
2560 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2561 
2562 	params->dco_integer = dco >> 15;
2563 	params->dco_fraction = dco & 0x7fff;
2564 }
2565 
2566 static bool
2567 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2568 			  struct skl_wrpll_params *wrpll_params,
2569 			  int ref_clock)
2570 {
2571 	u32 afe_clock = crtc_state->port_clock * 5;
2572 	u32 dco_min = 7998000;
2573 	u32 dco_max = 10000000;
2574 	u32 dco_mid = (dco_min + dco_max) / 2;
2575 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2576 					 18, 20, 24, 28, 30, 32,  36,  40,
2577 					 42, 44, 48, 50, 52, 54,  56,  60,
2578 					 64, 66, 68, 70, 72, 76,  78,  80,
2579 					 84, 88, 90, 92, 96, 98, 100, 102,
2580 					  3,  5,  7,  9, 15, 21 };
2581 	u32 dco, best_dco = 0, dco_centrality = 0;
2582 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2583 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2584 
2585 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2586 		dco = afe_clock * dividers[d];
2587 
2588 		if ((dco <= dco_max) && (dco >= dco_min)) {
2589 			dco_centrality = abs(dco - dco_mid);
2590 
2591 			if (dco_centrality < best_dco_centrality) {
2592 				best_dco_centrality = dco_centrality;
2593 				best_div = dividers[d];
2594 				best_dco = dco;
2595 			}
2596 		}
2597 	}
2598 
2599 	if (best_div == 0)
2600 		return false;
2601 
2602 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2603 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2604 				  pdiv, qdiv, kdiv);
2605 
2606 	return true;
2607 }
2608 
2609 static bool
2610 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2611 			struct skl_wrpll_params *wrpll_params)
2612 {
2613 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2614 
2615 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2616 					 i915->dpll.ref_clks.nssc);
2617 }
2618 
2619 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2620 {
2621 	u32 cfgcr0, cfgcr1;
2622 	struct skl_wrpll_params wrpll_params = { 0, };
2623 
2624 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2625 
2626 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2627 		return false;
2628 
2629 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2630 		wrpll_params.dco_integer;
2631 
2632 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2633 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2634 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2635 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2636 		DPLL_CFGCR1_CENTRAL_FREQ;
2637 
2638 	memset(&crtc_state->dpll_hw_state, 0,
2639 	       sizeof(crtc_state->dpll_hw_state));
2640 
2641 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2642 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2643 	return true;
2644 }
2645 
2646 /*
2647  * Display WA #22010492432: ehl, tgl
2648  * Program half of the nominal DCO divider fraction value.
2649  */
2650 static bool
2651 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2652 {
2653 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2654 		 IS_JSL_EHL_REVID(i915, EHL_REVID_B0, REVID_FOREVER)) ||
2655 		 IS_TIGERLAKE(i915)) &&
2656 		 i915->dpll.ref_clks.nssc == 38400;
2657 }
2658 
2659 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2660 				    const struct intel_shared_dpll *pll,
2661 				    const struct intel_dpll_hw_state *pll_state,
2662 				    int ref_clock)
2663 {
2664 	u32 dco_fraction;
2665 	u32 p0, p1, p2, dco_freq;
2666 
2667 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2668 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2669 
2670 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2671 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2672 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2673 	else
2674 		p1 = 1;
2675 
2676 
2677 	switch (p0) {
2678 	case DPLL_CFGCR1_PDIV_2:
2679 		p0 = 2;
2680 		break;
2681 	case DPLL_CFGCR1_PDIV_3:
2682 		p0 = 3;
2683 		break;
2684 	case DPLL_CFGCR1_PDIV_5:
2685 		p0 = 5;
2686 		break;
2687 	case DPLL_CFGCR1_PDIV_7:
2688 		p0 = 7;
2689 		break;
2690 	}
2691 
2692 	switch (p2) {
2693 	case DPLL_CFGCR1_KDIV_1:
2694 		p2 = 1;
2695 		break;
2696 	case DPLL_CFGCR1_KDIV_2:
2697 		p2 = 2;
2698 		break;
2699 	case DPLL_CFGCR1_KDIV_3:
2700 		p2 = 3;
2701 		break;
2702 	}
2703 
2704 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2705 		   ref_clock;
2706 
2707 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2708 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2709 
2710 	if (ehl_combo_pll_div_frac_wa_needed(dev_priv))
2711 		dco_fraction *= 2;
2712 
2713 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2714 
2715 	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2716 		return 0;
2717 
2718 	return dco_freq / (p0 * p1 * p2 * 5);
2719 }
2720 
2721 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2722 				  const struct intel_shared_dpll *pll,
2723 				  const struct intel_dpll_hw_state *pll_state)
2724 {
2725 	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
2726 					i915->dpll.ref_clks.nssc);
2727 }
2728 
2729 static bool
2730 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2731 {
2732 	u32 cfgcr0;
2733 
2734 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2735 
2736 	switch (crtc_state->port_clock / 2) {
2737 	case 81000:
2738 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2739 		break;
2740 	case 135000:
2741 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2742 		break;
2743 	case 270000:
2744 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2745 		break;
2746 		/* eDP 1.4 rates */
2747 	case 162000:
2748 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2749 		break;
2750 	case 108000:
2751 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2752 		break;
2753 	case 216000:
2754 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2755 		break;
2756 	case 324000:
2757 		/* Some SKUs may require elevated I/O voltage to support this */
2758 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2759 		break;
2760 	case 405000:
2761 		/* Some SKUs may require elevated I/O voltage to support this */
2762 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2763 		break;
2764 	}
2765 
2766 	memset(&crtc_state->dpll_hw_state, 0,
2767 	       sizeof(crtc_state->dpll_hw_state));
2768 
2769 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2770 
2771 	return true;
2772 }
2773 
2774 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2775 				  const struct intel_shared_dpll *pll,
2776 				  const struct intel_dpll_hw_state *pll_state)
2777 {
2778 	int link_clock = 0;
2779 
2780 	switch (pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2781 	case DPLL_CFGCR0_LINK_RATE_810:
2782 		link_clock = 81000;
2783 		break;
2784 	case DPLL_CFGCR0_LINK_RATE_1080:
2785 		link_clock = 108000;
2786 		break;
2787 	case DPLL_CFGCR0_LINK_RATE_1350:
2788 		link_clock = 135000;
2789 		break;
2790 	case DPLL_CFGCR0_LINK_RATE_1620:
2791 		link_clock = 162000;
2792 		break;
2793 	case DPLL_CFGCR0_LINK_RATE_2160:
2794 		link_clock = 216000;
2795 		break;
2796 	case DPLL_CFGCR0_LINK_RATE_2700:
2797 		link_clock = 270000;
2798 		break;
2799 	case DPLL_CFGCR0_LINK_RATE_3240:
2800 		link_clock = 324000;
2801 		break;
2802 	case DPLL_CFGCR0_LINK_RATE_4050:
2803 		link_clock = 405000;
2804 		break;
2805 	default:
2806 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2807 		break;
2808 	}
2809 
2810 	return link_clock * 2;
2811 }
2812 
2813 static bool cnl_get_dpll(struct intel_atomic_state *state,
2814 			 struct intel_crtc *crtc,
2815 			 struct intel_encoder *encoder)
2816 {
2817 	struct intel_crtc_state *crtc_state =
2818 		intel_atomic_get_new_crtc_state(state, crtc);
2819 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2820 	struct intel_shared_dpll *pll;
2821 	bool bret;
2822 
2823 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2824 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2825 		if (!bret) {
2826 			drm_dbg_kms(&i915->drm,
2827 				    "Could not get HDMI pll dividers.\n");
2828 			return false;
2829 		}
2830 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2831 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2832 		if (!bret) {
2833 			drm_dbg_kms(&i915->drm,
2834 				    "Could not set DP dpll HW state.\n");
2835 			return false;
2836 		}
2837 	} else {
2838 		drm_dbg_kms(&i915->drm,
2839 			    "Skip DPLL setup for output_types 0x%x\n",
2840 			    crtc_state->output_types);
2841 		return false;
2842 	}
2843 
2844 	pll = intel_find_shared_dpll(state, crtc,
2845 				     &crtc_state->dpll_hw_state,
2846 				     BIT(DPLL_ID_SKL_DPLL2) |
2847 				     BIT(DPLL_ID_SKL_DPLL1) |
2848 				     BIT(DPLL_ID_SKL_DPLL0));
2849 	if (!pll) {
2850 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2851 		return false;
2852 	}
2853 
2854 	intel_reference_shared_dpll(state, crtc,
2855 				    pll, &crtc_state->dpll_hw_state);
2856 
2857 	crtc_state->shared_dpll = pll;
2858 
2859 	return true;
2860 }
2861 
2862 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2863 				const struct intel_shared_dpll *pll,
2864 				const struct intel_dpll_hw_state *pll_state)
2865 {
2866 	if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2867 		return cnl_ddi_wrpll_get_freq(i915, pll, pll_state);
2868 	else
2869 		return cnl_ddi_lcpll_get_freq(i915, pll, pll_state);
2870 }
2871 
2872 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2873 {
2874 	/* No SSC reference */
2875 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2876 }
2877 
2878 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2879 			      const struct intel_dpll_hw_state *hw_state)
2880 {
2881 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2882 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2883 		    hw_state->cfgcr0,
2884 		    hw_state->cfgcr1);
2885 }
2886 
2887 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2888 	.enable = cnl_ddi_pll_enable,
2889 	.disable = cnl_ddi_pll_disable,
2890 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2891 	.get_freq = cnl_ddi_pll_get_freq,
2892 };
2893 
2894 static const struct dpll_info cnl_plls[] = {
2895 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2896 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2897 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2898 	{ },
2899 };
2900 
2901 static const struct intel_dpll_mgr cnl_pll_mgr = {
2902 	.dpll_info = cnl_plls,
2903 	.get_dplls = cnl_get_dpll,
2904 	.put_dplls = intel_put_dpll,
2905 	.update_ref_clks = cnl_update_dpll_ref_clks,
2906 	.dump_hw_state = cnl_dump_hw_state,
2907 };
2908 
2909 struct icl_combo_pll_params {
2910 	int clock;
2911 	struct skl_wrpll_params wrpll;
2912 };
2913 
2914 /*
2915  * These values alrea already adjusted: they're the bits we write to the
2916  * registers, not the logical values.
2917  */
2918 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2919 	{ 540000,
2920 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2921 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2922 	{ 270000,
2923 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2924 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2925 	{ 162000,
2926 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2927 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2928 	{ 324000,
2929 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2930 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2931 	{ 216000,
2932 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2933 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2934 	{ 432000,
2935 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2936 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2937 	{ 648000,
2938 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2939 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2940 	{ 810000,
2941 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2942 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2943 };
2944 
2945 
2946 /* Also used for 38.4 MHz values. */
2947 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2948 	{ 540000,
2949 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2950 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2951 	{ 270000,
2952 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2953 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2954 	{ 162000,
2955 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2956 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2957 	{ 324000,
2958 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2959 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2960 	{ 216000,
2961 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2962 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2963 	{ 432000,
2964 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2965 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2966 	{ 648000,
2967 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2968 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2969 	{ 810000,
2970 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2971 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2972 };
2973 
2974 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2975 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2976 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2977 };
2978 
2979 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2980 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2981 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2982 };
2983 
2984 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2985 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2986 	/* the following params are unused */
2987 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2988 };
2989 
2990 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2991 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2992 	/* the following params are unused */
2993 };
2994 
2995 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2996 				  struct skl_wrpll_params *pll_params)
2997 {
2998 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2999 	const struct icl_combo_pll_params *params =
3000 		dev_priv->dpll.ref_clks.nssc == 24000 ?
3001 		icl_dp_combo_pll_24MHz_values :
3002 		icl_dp_combo_pll_19_2MHz_values;
3003 	int clock = crtc_state->port_clock;
3004 	int i;
3005 
3006 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
3007 		if (clock == params[i].clock) {
3008 			*pll_params = params[i].wrpll;
3009 			return true;
3010 		}
3011 	}
3012 
3013 	MISSING_CASE(clock);
3014 	return false;
3015 }
3016 
3017 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
3018 			     struct skl_wrpll_params *pll_params)
3019 {
3020 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3021 
3022 	if (DISPLAY_VER(dev_priv) >= 12) {
3023 		switch (dev_priv->dpll.ref_clks.nssc) {
3024 		default:
3025 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3026 			fallthrough;
3027 		case 19200:
3028 		case 38400:
3029 			*pll_params = tgl_tbt_pll_19_2MHz_values;
3030 			break;
3031 		case 24000:
3032 			*pll_params = tgl_tbt_pll_24MHz_values;
3033 			break;
3034 		}
3035 	} else {
3036 		switch (dev_priv->dpll.ref_clks.nssc) {
3037 		default:
3038 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3039 			fallthrough;
3040 		case 19200:
3041 		case 38400:
3042 			*pll_params = icl_tbt_pll_19_2MHz_values;
3043 			break;
3044 		case 24000:
3045 			*pll_params = icl_tbt_pll_24MHz_values;
3046 			break;
3047 		}
3048 	}
3049 
3050 	return true;
3051 }
3052 
3053 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3054 				    const struct intel_shared_dpll *pll,
3055 				    const struct intel_dpll_hw_state *pll_state)
3056 {
3057 	/*
3058 	 * The PLL outputs multiple frequencies at the same time, selection is
3059 	 * made at DDI clock mux level.
3060 	 */
3061 	drm_WARN_ON(&i915->drm, 1);
3062 
3063 	return 0;
3064 }
3065 
3066 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3067 {
3068 	int ref_clock = i915->dpll.ref_clks.nssc;
3069 
3070 	/*
3071 	 * For ICL+, the spec states: if reference frequency is 38.4,
3072 	 * use 19.2 because the DPLL automatically divides that by 2.
3073 	 */
3074 	if (ref_clock == 38400)
3075 		ref_clock = 19200;
3076 
3077 	return ref_clock;
3078 }
3079 
3080 static bool
3081 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3082 	       struct skl_wrpll_params *wrpll_params)
3083 {
3084 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3085 
3086 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3087 					 icl_wrpll_ref_clock(i915));
3088 }
3089 
3090 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3091 				      const struct intel_shared_dpll *pll,
3092 				      const struct intel_dpll_hw_state *pll_state)
3093 {
3094 	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
3095 					icl_wrpll_ref_clock(i915));
3096 }
3097 
3098 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3099 				const struct skl_wrpll_params *pll_params,
3100 				struct intel_dpll_hw_state *pll_state)
3101 {
3102 	u32 dco_fraction = pll_params->dco_fraction;
3103 
3104 	memset(pll_state, 0, sizeof(*pll_state));
3105 
3106 	if (ehl_combo_pll_div_frac_wa_needed(i915))
3107 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3108 
3109 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3110 			    pll_params->dco_integer;
3111 
3112 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3113 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3114 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3115 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3116 
3117 	if (DISPLAY_VER(i915) >= 12)
3118 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3119 	else
3120 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3121 }
3122 
3123 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3124 {
3125 	return id - DPLL_ID_ICL_MGPLL1;
3126 }
3127 
3128 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3129 {
3130 	return tc_port + DPLL_ID_ICL_MGPLL1;
3131 }
3132 
3133 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3134 				     u32 *target_dco_khz,
3135 				     struct intel_dpll_hw_state *state,
3136 				     bool is_dkl)
3137 {
3138 	u32 dco_min_freq, dco_max_freq;
3139 	int div1_vals[] = {7, 5, 3, 2};
3140 	unsigned int i;
3141 	int div2;
3142 
3143 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3144 	dco_max_freq = is_dp ? 8100000 : 10000000;
3145 
3146 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3147 		int div1 = div1_vals[i];
3148 
3149 		for (div2 = 10; div2 > 0; div2--) {
3150 			int dco = div1 * div2 * clock_khz * 5;
3151 			int a_divratio, tlinedrv, inputsel;
3152 			u32 hsdiv;
3153 
3154 			if (dco < dco_min_freq || dco > dco_max_freq)
3155 				continue;
3156 
3157 			if (div2 >= 2) {
3158 				/*
3159 				 * Note: a_divratio not matching TGL BSpec
3160 				 * algorithm but matching hardcoded values and
3161 				 * working on HW for DP alt-mode at least
3162 				 */
3163 				a_divratio = is_dp ? 10 : 5;
3164 				tlinedrv = is_dkl ? 1 : 2;
3165 			} else {
3166 				a_divratio = 5;
3167 				tlinedrv = 0;
3168 			}
3169 			inputsel = is_dp ? 0 : 1;
3170 
3171 			switch (div1) {
3172 			default:
3173 				MISSING_CASE(div1);
3174 				fallthrough;
3175 			case 2:
3176 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3177 				break;
3178 			case 3:
3179 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3180 				break;
3181 			case 5:
3182 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3183 				break;
3184 			case 7:
3185 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3186 				break;
3187 			}
3188 
3189 			*target_dco_khz = dco;
3190 
3191 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3192 
3193 			state->mg_clktop2_coreclkctl1 =
3194 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3195 
3196 			state->mg_clktop2_hsclkctl =
3197 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3198 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3199 				hsdiv |
3200 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3201 
3202 			return true;
3203 		}
3204 	}
3205 
3206 	return false;
3207 }
3208 
3209 /*
3210  * The specification for this function uses real numbers, so the math had to be
3211  * adapted to integer-only calculation, that's why it looks so different.
3212  */
3213 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3214 				  struct intel_dpll_hw_state *pll_state)
3215 {
3216 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3217 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3218 	int clock = crtc_state->port_clock;
3219 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3220 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3221 	u32 prop_coeff, int_coeff;
3222 	u32 tdc_targetcnt, feedfwgain;
3223 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3224 	u64 tmp;
3225 	bool use_ssc = false;
3226 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3227 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
3228 
3229 	memset(pll_state, 0, sizeof(*pll_state));
3230 
3231 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3232 				      pll_state, is_dkl)) {
3233 		drm_dbg_kms(&dev_priv->drm,
3234 			    "Failed to find divisors for clock %d\n", clock);
3235 		return false;
3236 	}
3237 
3238 	m1div = 2;
3239 	m2div_int = dco_khz / (refclk_khz * m1div);
3240 	if (m2div_int > 255) {
3241 		if (!is_dkl) {
3242 			m1div = 4;
3243 			m2div_int = dco_khz / (refclk_khz * m1div);
3244 		}
3245 
3246 		if (m2div_int > 255) {
3247 			drm_dbg_kms(&dev_priv->drm,
3248 				    "Failed to find mdiv for clock %d\n",
3249 				    clock);
3250 			return false;
3251 		}
3252 	}
3253 	m2div_rem = dco_khz % (refclk_khz * m1div);
3254 
3255 	tmp = (u64)m2div_rem * (1 << 22);
3256 	do_div(tmp, refclk_khz * m1div);
3257 	m2div_frac = tmp;
3258 
3259 	switch (refclk_khz) {
3260 	case 19200:
3261 		iref_ndiv = 1;
3262 		iref_trim = 28;
3263 		iref_pulse_w = 1;
3264 		break;
3265 	case 24000:
3266 		iref_ndiv = 1;
3267 		iref_trim = 25;
3268 		iref_pulse_w = 2;
3269 		break;
3270 	case 38400:
3271 		iref_ndiv = 2;
3272 		iref_trim = 28;
3273 		iref_pulse_w = 1;
3274 		break;
3275 	default:
3276 		MISSING_CASE(refclk_khz);
3277 		return false;
3278 	}
3279 
3280 	/*
3281 	 * tdc_res = 0.000003
3282 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3283 	 *
3284 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3285 	 * was supposed to be a division, but we rearranged the operations of
3286 	 * the formula to avoid early divisions so we don't multiply the
3287 	 * rounding errors.
3288 	 *
3289 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3290 	 * we also rearrange to work with integers.
3291 	 *
3292 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3293 	 * last division by 10.
3294 	 */
3295 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3296 
3297 	/*
3298 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3299 	 * 32 bits. That's not a problem since we round the division down
3300 	 * anyway.
3301 	 */
3302 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3303 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3304 
3305 	if (dco_khz >= 9000000) {
3306 		prop_coeff = 5;
3307 		int_coeff = 10;
3308 	} else {
3309 		prop_coeff = 4;
3310 		int_coeff = 8;
3311 	}
3312 
3313 	if (use_ssc) {
3314 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3315 		do_div(tmp, refclk_khz * m1div * 10000);
3316 		ssc_stepsize = tmp;
3317 
3318 		tmp = mul_u32_u32(dco_khz, 1000);
3319 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3320 	} else {
3321 		ssc_stepsize = 0;
3322 		ssc_steplen = 0;
3323 	}
3324 	ssc_steplog = 4;
3325 
3326 	/* write pll_state calculations */
3327 	if (is_dkl) {
3328 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3329 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3330 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3331 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3332 
3333 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3334 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3335 
3336 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3337 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3338 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3339 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3340 
3341 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3342 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3343 
3344 		pll_state->mg_pll_tdc_coldst_bias =
3345 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3346 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3347 
3348 	} else {
3349 		pll_state->mg_pll_div0 =
3350 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3351 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3352 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3353 
3354 		pll_state->mg_pll_div1 =
3355 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3356 			MG_PLL_DIV1_DITHER_DIV_2 |
3357 			MG_PLL_DIV1_NDIVRATIO(1) |
3358 			MG_PLL_DIV1_FBPREDIV(m1div);
3359 
3360 		pll_state->mg_pll_lf =
3361 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3362 			MG_PLL_LF_AFCCNTSEL_512 |
3363 			MG_PLL_LF_GAINCTRL(1) |
3364 			MG_PLL_LF_INT_COEFF(int_coeff) |
3365 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3366 
3367 		pll_state->mg_pll_frac_lock =
3368 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3369 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3370 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3371 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3372 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3373 		if (use_ssc || m2div_rem > 0)
3374 			pll_state->mg_pll_frac_lock |=
3375 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3376 
3377 		pll_state->mg_pll_ssc =
3378 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3379 			MG_PLL_SSC_TYPE(2) |
3380 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3381 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3382 			MG_PLL_SSC_FLLEN |
3383 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3384 
3385 		pll_state->mg_pll_tdc_coldst_bias =
3386 			MG_PLL_TDC_COLDST_COLDSTART |
3387 			MG_PLL_TDC_COLDST_IREFINT_EN |
3388 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3389 			MG_PLL_TDC_TDCOVCCORR_EN |
3390 			MG_PLL_TDC_TDCSEL(3);
3391 
3392 		pll_state->mg_pll_bias =
3393 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3394 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3395 			MG_PLL_BIAS_BIAS_BONUS(10) |
3396 			MG_PLL_BIAS_BIASCAL_EN |
3397 			MG_PLL_BIAS_CTRIM(12) |
3398 			MG_PLL_BIAS_VREF_RDAC(4) |
3399 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3400 
3401 		if (refclk_khz == 38400) {
3402 			pll_state->mg_pll_tdc_coldst_bias_mask =
3403 				MG_PLL_TDC_COLDST_COLDSTART;
3404 			pll_state->mg_pll_bias_mask = 0;
3405 		} else {
3406 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3407 			pll_state->mg_pll_bias_mask = -1U;
3408 		}
3409 
3410 		pll_state->mg_pll_tdc_coldst_bias &=
3411 			pll_state->mg_pll_tdc_coldst_bias_mask;
3412 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3413 	}
3414 
3415 	return true;
3416 }
3417 
3418 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3419 				   const struct intel_shared_dpll *pll,
3420 				   const struct intel_dpll_hw_state *pll_state)
3421 {
3422 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3423 	u64 tmp;
3424 
3425 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3426 
3427 	if (DISPLAY_VER(dev_priv) >= 12) {
3428 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3429 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3430 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3431 
3432 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3433 			m2_frac = pll_state->mg_pll_bias &
3434 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3435 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3436 		} else {
3437 			m2_frac = 0;
3438 		}
3439 	} else {
3440 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3441 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3442 
3443 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3444 			m2_frac = pll_state->mg_pll_div0 &
3445 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3446 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3447 		} else {
3448 			m2_frac = 0;
3449 		}
3450 	}
3451 
3452 	switch (pll_state->mg_clktop2_hsclkctl &
3453 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3454 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3455 		div1 = 2;
3456 		break;
3457 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3458 		div1 = 3;
3459 		break;
3460 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3461 		div1 = 5;
3462 		break;
3463 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3464 		div1 = 7;
3465 		break;
3466 	default:
3467 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3468 		return 0;
3469 	}
3470 
3471 	div2 = (pll_state->mg_clktop2_hsclkctl &
3472 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3473 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3474 
3475 	/* div2 value of 0 is same as 1 means no div */
3476 	if (div2 == 0)
3477 		div2 = 1;
3478 
3479 	/*
3480 	 * Adjust the original formula to delay the division by 2^22 in order to
3481 	 * minimize possible rounding errors.
3482 	 */
3483 	tmp = (u64)m1 * m2_int * ref_clock +
3484 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3485 	tmp = div_u64(tmp, 5 * div1 * div2);
3486 
3487 	return tmp;
3488 }
3489 
3490 /**
3491  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3492  * @crtc_state: state for the CRTC to select the DPLL for
3493  * @port_dpll_id: the active @port_dpll_id to select
3494  *
3495  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3496  * CRTC.
3497  */
3498 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3499 			      enum icl_port_dpll_id port_dpll_id)
3500 {
3501 	struct icl_port_dpll *port_dpll =
3502 		&crtc_state->icl_port_dplls[port_dpll_id];
3503 
3504 	crtc_state->shared_dpll = port_dpll->pll;
3505 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3506 }
3507 
3508 static void icl_update_active_dpll(struct intel_atomic_state *state,
3509 				   struct intel_crtc *crtc,
3510 				   struct intel_encoder *encoder)
3511 {
3512 	struct intel_crtc_state *crtc_state =
3513 		intel_atomic_get_new_crtc_state(state, crtc);
3514 	struct intel_digital_port *primary_port;
3515 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3516 
3517 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3518 		enc_to_mst(encoder)->primary :
3519 		enc_to_dig_port(encoder);
3520 
3521 	if (primary_port &&
3522 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3523 	     primary_port->tc_mode == TC_PORT_LEGACY))
3524 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3525 
3526 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3527 }
3528 
3529 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3530 {
3531 	if (!(i915->hti_state & HDPORT_ENABLED))
3532 		return 0;
3533 
3534 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3535 }
3536 
3537 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3538 				   struct intel_crtc *crtc,
3539 				   struct intel_encoder *encoder)
3540 {
3541 	struct intel_crtc_state *crtc_state =
3542 		intel_atomic_get_new_crtc_state(state, crtc);
3543 	struct skl_wrpll_params pll_params = { };
3544 	struct icl_port_dpll *port_dpll =
3545 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3546 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3547 	enum port port = encoder->port;
3548 	unsigned long dpll_mask;
3549 	int ret;
3550 
3551 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3552 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3553 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3554 	else
3555 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3556 
3557 	if (!ret) {
3558 		drm_dbg_kms(&dev_priv->drm,
3559 			    "Could not calculate combo PHY PLL state.\n");
3560 
3561 		return false;
3562 	}
3563 
3564 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3565 
3566 	if (IS_ALDERLAKE_S(dev_priv)) {
3567 		dpll_mask =
3568 			BIT(DPLL_ID_DG1_DPLL3) |
3569 			BIT(DPLL_ID_DG1_DPLL2) |
3570 			BIT(DPLL_ID_ICL_DPLL1) |
3571 			BIT(DPLL_ID_ICL_DPLL0);
3572 	} else if (IS_DG1(dev_priv)) {
3573 		if (port == PORT_D || port == PORT_E) {
3574 			dpll_mask =
3575 				BIT(DPLL_ID_DG1_DPLL2) |
3576 				BIT(DPLL_ID_DG1_DPLL3);
3577 		} else {
3578 			dpll_mask =
3579 				BIT(DPLL_ID_DG1_DPLL0) |
3580 				BIT(DPLL_ID_DG1_DPLL1);
3581 		}
3582 	} else if (IS_ROCKETLAKE(dev_priv)) {
3583 		dpll_mask =
3584 			BIT(DPLL_ID_EHL_DPLL4) |
3585 			BIT(DPLL_ID_ICL_DPLL1) |
3586 			BIT(DPLL_ID_ICL_DPLL0);
3587 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3588 		dpll_mask =
3589 			BIT(DPLL_ID_EHL_DPLL4) |
3590 			BIT(DPLL_ID_ICL_DPLL1) |
3591 			BIT(DPLL_ID_ICL_DPLL0);
3592 	} else {
3593 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3594 	}
3595 
3596 	/* Eliminate DPLLs from consideration if reserved by HTI */
3597 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3598 
3599 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3600 						&port_dpll->hw_state,
3601 						dpll_mask);
3602 	if (!port_dpll->pll) {
3603 		drm_dbg_kms(&dev_priv->drm,
3604 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3605 			    encoder->base.base.id, encoder->base.name);
3606 		return false;
3607 	}
3608 
3609 	intel_reference_shared_dpll(state, crtc,
3610 				    port_dpll->pll, &port_dpll->hw_state);
3611 
3612 	icl_update_active_dpll(state, crtc, encoder);
3613 
3614 	return true;
3615 }
3616 
3617 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3618 				 struct intel_crtc *crtc,
3619 				 struct intel_encoder *encoder)
3620 {
3621 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3622 	struct intel_crtc_state *crtc_state =
3623 		intel_atomic_get_new_crtc_state(state, crtc);
3624 	struct skl_wrpll_params pll_params = { };
3625 	struct icl_port_dpll *port_dpll;
3626 	enum intel_dpll_id dpll_id;
3627 
3628 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3629 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3630 		drm_dbg_kms(&dev_priv->drm,
3631 			    "Could not calculate TBT PLL state.\n");
3632 		return false;
3633 	}
3634 
3635 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3636 
3637 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3638 						&port_dpll->hw_state,
3639 						BIT(DPLL_ID_ICL_TBTPLL));
3640 	if (!port_dpll->pll) {
3641 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3642 		return false;
3643 	}
3644 	intel_reference_shared_dpll(state, crtc,
3645 				    port_dpll->pll, &port_dpll->hw_state);
3646 
3647 
3648 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3649 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3650 		drm_dbg_kms(&dev_priv->drm,
3651 			    "Could not calculate MG PHY PLL state.\n");
3652 		goto err_unreference_tbt_pll;
3653 	}
3654 
3655 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3656 							 encoder->port));
3657 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3658 						&port_dpll->hw_state,
3659 						BIT(dpll_id));
3660 	if (!port_dpll->pll) {
3661 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3662 		goto err_unreference_tbt_pll;
3663 	}
3664 	intel_reference_shared_dpll(state, crtc,
3665 				    port_dpll->pll, &port_dpll->hw_state);
3666 
3667 	icl_update_active_dpll(state, crtc, encoder);
3668 
3669 	return true;
3670 
3671 err_unreference_tbt_pll:
3672 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3673 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3674 
3675 	return false;
3676 }
3677 
3678 static bool icl_get_dplls(struct intel_atomic_state *state,
3679 			  struct intel_crtc *crtc,
3680 			  struct intel_encoder *encoder)
3681 {
3682 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3683 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3684 
3685 	if (intel_phy_is_combo(dev_priv, phy))
3686 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3687 	else if (intel_phy_is_tc(dev_priv, phy))
3688 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3689 
3690 	MISSING_CASE(phy);
3691 
3692 	return false;
3693 }
3694 
3695 static void icl_put_dplls(struct intel_atomic_state *state,
3696 			  struct intel_crtc *crtc)
3697 {
3698 	const struct intel_crtc_state *old_crtc_state =
3699 		intel_atomic_get_old_crtc_state(state, crtc);
3700 	struct intel_crtc_state *new_crtc_state =
3701 		intel_atomic_get_new_crtc_state(state, crtc);
3702 	enum icl_port_dpll_id id;
3703 
3704 	new_crtc_state->shared_dpll = NULL;
3705 
3706 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3707 		const struct icl_port_dpll *old_port_dpll =
3708 			&old_crtc_state->icl_port_dplls[id];
3709 		struct icl_port_dpll *new_port_dpll =
3710 			&new_crtc_state->icl_port_dplls[id];
3711 
3712 		new_port_dpll->pll = NULL;
3713 
3714 		if (!old_port_dpll->pll)
3715 			continue;
3716 
3717 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3718 	}
3719 }
3720 
3721 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3722 				struct intel_shared_dpll *pll,
3723 				struct intel_dpll_hw_state *hw_state)
3724 {
3725 	const enum intel_dpll_id id = pll->info->id;
3726 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3727 	intel_wakeref_t wakeref;
3728 	bool ret = false;
3729 	u32 val;
3730 
3731 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3732 						     POWER_DOMAIN_DISPLAY_CORE);
3733 	if (!wakeref)
3734 		return false;
3735 
3736 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3737 	if (!(val & PLL_ENABLE))
3738 		goto out;
3739 
3740 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3741 						  MG_REFCLKIN_CTL(tc_port));
3742 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3743 
3744 	hw_state->mg_clktop2_coreclkctl1 =
3745 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3746 	hw_state->mg_clktop2_coreclkctl1 &=
3747 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3748 
3749 	hw_state->mg_clktop2_hsclkctl =
3750 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3751 	hw_state->mg_clktop2_hsclkctl &=
3752 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3753 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3754 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3755 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3756 
3757 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3758 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3759 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3760 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3761 						   MG_PLL_FRAC_LOCK(tc_port));
3762 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3763 
3764 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3765 	hw_state->mg_pll_tdc_coldst_bias =
3766 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3767 
3768 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3769 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3770 		hw_state->mg_pll_bias_mask = 0;
3771 	} else {
3772 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3773 		hw_state->mg_pll_bias_mask = -1U;
3774 	}
3775 
3776 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3777 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3778 
3779 	ret = true;
3780 out:
3781 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3782 	return ret;
3783 }
3784 
3785 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3786 				 struct intel_shared_dpll *pll,
3787 				 struct intel_dpll_hw_state *hw_state)
3788 {
3789 	const enum intel_dpll_id id = pll->info->id;
3790 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3791 	intel_wakeref_t wakeref;
3792 	bool ret = false;
3793 	u32 val;
3794 
3795 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3796 						     POWER_DOMAIN_DISPLAY_CORE);
3797 	if (!wakeref)
3798 		return false;
3799 
3800 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3801 	if (!(val & PLL_ENABLE))
3802 		goto out;
3803 
3804 	/*
3805 	 * All registers read here have the same HIP_INDEX_REG even though
3806 	 * they are on different building blocks
3807 	 */
3808 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3809 		       HIP_INDEX_VAL(tc_port, 0x2));
3810 
3811 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3812 						  DKL_REFCLKIN_CTL(tc_port));
3813 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3814 
3815 	hw_state->mg_clktop2_hsclkctl =
3816 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3817 	hw_state->mg_clktop2_hsclkctl &=
3818 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3819 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3820 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3821 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3822 
3823 	hw_state->mg_clktop2_coreclkctl1 =
3824 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3825 	hw_state->mg_clktop2_coreclkctl1 &=
3826 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3827 
3828 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3829 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3830 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3831 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3832 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3833 
3834 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3835 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3836 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3837 
3838 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3839 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3840 				 DKL_PLL_SSC_STEP_LEN_MASK |
3841 				 DKL_PLL_SSC_STEP_NUM_MASK |
3842 				 DKL_PLL_SSC_EN);
3843 
3844 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3845 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3846 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3847 
3848 	hw_state->mg_pll_tdc_coldst_bias =
3849 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3850 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3851 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3852 
3853 	ret = true;
3854 out:
3855 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3856 	return ret;
3857 }
3858 
3859 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3860 				 struct intel_shared_dpll *pll,
3861 				 struct intel_dpll_hw_state *hw_state,
3862 				 i915_reg_t enable_reg)
3863 {
3864 	const enum intel_dpll_id id = pll->info->id;
3865 	intel_wakeref_t wakeref;
3866 	bool ret = false;
3867 	u32 val;
3868 
3869 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3870 						     POWER_DOMAIN_DISPLAY_CORE);
3871 	if (!wakeref)
3872 		return false;
3873 
3874 	val = intel_de_read(dev_priv, enable_reg);
3875 	if (!(val & PLL_ENABLE))
3876 		goto out;
3877 
3878 	if (IS_ALDERLAKE_S(dev_priv)) {
3879 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3880 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3881 	} else if (IS_DG1(dev_priv)) {
3882 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3883 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3884 	} else if (IS_ROCKETLAKE(dev_priv)) {
3885 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3886 						 RKL_DPLL_CFGCR0(id));
3887 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3888 						 RKL_DPLL_CFGCR1(id));
3889 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3890 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3891 						 TGL_DPLL_CFGCR0(id));
3892 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3893 						 TGL_DPLL_CFGCR1(id));
3894 	} else {
3895 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3896 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3897 							 ICL_DPLL_CFGCR0(4));
3898 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3899 							 ICL_DPLL_CFGCR1(4));
3900 		} else {
3901 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3902 							 ICL_DPLL_CFGCR0(id));
3903 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3904 							 ICL_DPLL_CFGCR1(id));
3905 		}
3906 	}
3907 
3908 	ret = true;
3909 out:
3910 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3911 	return ret;
3912 }
3913 
3914 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3915 				   struct intel_shared_dpll *pll,
3916 				   struct intel_dpll_hw_state *hw_state)
3917 {
3918 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3919 
3920 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3921 }
3922 
3923 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3924 				 struct intel_shared_dpll *pll,
3925 				 struct intel_dpll_hw_state *hw_state)
3926 {
3927 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3928 }
3929 
3930 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3931 			   struct intel_shared_dpll *pll)
3932 {
3933 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3934 	const enum intel_dpll_id id = pll->info->id;
3935 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3936 
3937 	if (IS_ALDERLAKE_S(dev_priv)) {
3938 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3939 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3940 	} else if (IS_DG1(dev_priv)) {
3941 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3942 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3943 	} else if (IS_ROCKETLAKE(dev_priv)) {
3944 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3945 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3946 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3947 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3948 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3949 	} else {
3950 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3951 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3952 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3953 		} else {
3954 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3955 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3956 		}
3957 	}
3958 
3959 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3960 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3961 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3962 }
3963 
3964 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3965 			     struct intel_shared_dpll *pll)
3966 {
3967 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3968 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3969 	u32 val;
3970 
3971 	/*
3972 	 * Some of the following registers have reserved fields, so program
3973 	 * these with RMW based on a mask. The mask can be fixed or generated
3974 	 * during the calc/readout phase if the mask depends on some other HW
3975 	 * state like refclk, see icl_calc_mg_pll_state().
3976 	 */
3977 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3978 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3979 	val |= hw_state->mg_refclkin_ctl;
3980 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3981 
3982 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3983 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3984 	val |= hw_state->mg_clktop2_coreclkctl1;
3985 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3986 
3987 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3988 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3989 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3990 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3991 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3992 	val |= hw_state->mg_clktop2_hsclkctl;
3993 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3994 
3995 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3996 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3997 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3998 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3999 		       hw_state->mg_pll_frac_lock);
4000 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
4001 
4002 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
4003 	val &= ~hw_state->mg_pll_bias_mask;
4004 	val |= hw_state->mg_pll_bias;
4005 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
4006 
4007 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4008 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
4009 	val |= hw_state->mg_pll_tdc_coldst_bias;
4010 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
4011 
4012 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4013 }
4014 
4015 static void dkl_pll_write(struct drm_i915_private *dev_priv,
4016 			  struct intel_shared_dpll *pll)
4017 {
4018 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
4019 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
4020 	u32 val;
4021 
4022 	/*
4023 	 * All registers programmed here have the same HIP_INDEX_REG even
4024 	 * though on different building block
4025 	 */
4026 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
4027 		       HIP_INDEX_VAL(tc_port, 0x2));
4028 
4029 	/* All the registers are RMW */
4030 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
4031 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
4032 	val |= hw_state->mg_refclkin_ctl;
4033 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
4034 
4035 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
4036 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
4037 	val |= hw_state->mg_clktop2_coreclkctl1;
4038 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
4039 
4040 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
4041 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4042 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4043 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4044 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4045 	val |= hw_state->mg_clktop2_hsclkctl;
4046 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
4047 
4048 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
4049 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
4050 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
4051 		 DKL_PLL_DIV0_FBPREDIV_MASK |
4052 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
4053 	val |= hw_state->mg_pll_div0;
4054 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
4055 
4056 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
4057 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4058 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4059 	val |= hw_state->mg_pll_div1;
4060 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4061 
4062 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4063 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4064 		 DKL_PLL_SSC_STEP_LEN_MASK |
4065 		 DKL_PLL_SSC_STEP_NUM_MASK |
4066 		 DKL_PLL_SSC_EN);
4067 	val |= hw_state->mg_pll_ssc;
4068 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4069 
4070 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4071 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4072 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4073 	val |= hw_state->mg_pll_bias;
4074 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4075 
4076 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4077 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4078 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4079 	val |= hw_state->mg_pll_tdc_coldst_bias;
4080 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4081 
4082 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4083 }
4084 
4085 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4086 				 struct intel_shared_dpll *pll,
4087 				 i915_reg_t enable_reg)
4088 {
4089 	u32 val;
4090 
4091 	val = intel_de_read(dev_priv, enable_reg);
4092 	val |= PLL_POWER_ENABLE;
4093 	intel_de_write(dev_priv, enable_reg, val);
4094 
4095 	/*
4096 	 * The spec says we need to "wait" but it also says it should be
4097 	 * immediate.
4098 	 */
4099 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4100 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4101 			pll->info->id);
4102 }
4103 
4104 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4105 			   struct intel_shared_dpll *pll,
4106 			   i915_reg_t enable_reg)
4107 {
4108 	u32 val;
4109 
4110 	val = intel_de_read(dev_priv, enable_reg);
4111 	val |= PLL_ENABLE;
4112 	intel_de_write(dev_priv, enable_reg, val);
4113 
4114 	/* Timeout is actually 600us. */
4115 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4116 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4117 }
4118 
4119 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4120 			     struct intel_shared_dpll *pll)
4121 {
4122 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4123 
4124 	if (IS_JSL_EHL(dev_priv) &&
4125 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4126 
4127 		/*
4128 		 * We need to disable DC states when this DPLL is enabled.
4129 		 * This can be done by taking a reference on DPLL4 power
4130 		 * domain.
4131 		 */
4132 		pll->wakeref = intel_display_power_get(dev_priv,
4133 						       POWER_DOMAIN_DPLL_DC_OFF);
4134 	}
4135 
4136 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4137 
4138 	icl_dpll_write(dev_priv, pll);
4139 
4140 	/*
4141 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4142 	 * paths should already be setting the appropriate voltage, hence we do
4143 	 * nothing here.
4144 	 */
4145 
4146 	icl_pll_enable(dev_priv, pll, enable_reg);
4147 
4148 	/* DVFS post sequence would be here. See the comment above. */
4149 }
4150 
4151 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4152 			   struct intel_shared_dpll *pll)
4153 {
4154 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4155 
4156 	icl_dpll_write(dev_priv, pll);
4157 
4158 	/*
4159 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4160 	 * paths should already be setting the appropriate voltage, hence we do
4161 	 * nothing here.
4162 	 */
4163 
4164 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4165 
4166 	/* DVFS post sequence would be here. See the comment above. */
4167 }
4168 
4169 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4170 			  struct intel_shared_dpll *pll)
4171 {
4172 	i915_reg_t enable_reg =
4173 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4174 
4175 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4176 
4177 	if (DISPLAY_VER(dev_priv) >= 12)
4178 		dkl_pll_write(dev_priv, pll);
4179 	else
4180 		icl_mg_pll_write(dev_priv, pll);
4181 
4182 	/*
4183 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4184 	 * paths should already be setting the appropriate voltage, hence we do
4185 	 * nothing here.
4186 	 */
4187 
4188 	icl_pll_enable(dev_priv, pll, enable_reg);
4189 
4190 	/* DVFS post sequence would be here. See the comment above. */
4191 }
4192 
4193 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4194 			    struct intel_shared_dpll *pll,
4195 			    i915_reg_t enable_reg)
4196 {
4197 	u32 val;
4198 
4199 	/* The first steps are done by intel_ddi_post_disable(). */
4200 
4201 	/*
4202 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4203 	 * paths should already be setting the appropriate voltage, hence we do
4204 	 * nothing here.
4205 	 */
4206 
4207 	val = intel_de_read(dev_priv, enable_reg);
4208 	val &= ~PLL_ENABLE;
4209 	intel_de_write(dev_priv, enable_reg, val);
4210 
4211 	/* Timeout is actually 1us. */
4212 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4213 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4214 
4215 	/* DVFS post sequence would be here. See the comment above. */
4216 
4217 	val = intel_de_read(dev_priv, enable_reg);
4218 	val &= ~PLL_POWER_ENABLE;
4219 	intel_de_write(dev_priv, enable_reg, val);
4220 
4221 	/*
4222 	 * The spec says we need to "wait" but it also says it should be
4223 	 * immediate.
4224 	 */
4225 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4226 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4227 			pll->info->id);
4228 }
4229 
4230 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4231 			      struct intel_shared_dpll *pll)
4232 {
4233 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4234 
4235 	icl_pll_disable(dev_priv, pll, enable_reg);
4236 
4237 	if (IS_JSL_EHL(dev_priv) &&
4238 	    pll->info->id == DPLL_ID_EHL_DPLL4)
4239 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4240 					pll->wakeref);
4241 }
4242 
4243 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4244 			    struct intel_shared_dpll *pll)
4245 {
4246 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4247 }
4248 
4249 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4250 			   struct intel_shared_dpll *pll)
4251 {
4252 	i915_reg_t enable_reg =
4253 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4254 
4255 	icl_pll_disable(dev_priv, pll, enable_reg);
4256 }
4257 
4258 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4259 {
4260 	/* No SSC ref */
4261 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4262 }
4263 
4264 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4265 			      const struct intel_dpll_hw_state *hw_state)
4266 {
4267 	drm_dbg_kms(&dev_priv->drm,
4268 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4269 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4270 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4271 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4272 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4273 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4274 		    hw_state->cfgcr0, hw_state->cfgcr1,
4275 		    hw_state->mg_refclkin_ctl,
4276 		    hw_state->mg_clktop2_coreclkctl1,
4277 		    hw_state->mg_clktop2_hsclkctl,
4278 		    hw_state->mg_pll_div0,
4279 		    hw_state->mg_pll_div1,
4280 		    hw_state->mg_pll_lf,
4281 		    hw_state->mg_pll_frac_lock,
4282 		    hw_state->mg_pll_ssc,
4283 		    hw_state->mg_pll_bias,
4284 		    hw_state->mg_pll_tdc_coldst_bias);
4285 }
4286 
4287 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4288 	.enable = combo_pll_enable,
4289 	.disable = combo_pll_disable,
4290 	.get_hw_state = combo_pll_get_hw_state,
4291 	.get_freq = icl_ddi_combo_pll_get_freq,
4292 };
4293 
4294 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4295 	.enable = tbt_pll_enable,
4296 	.disable = tbt_pll_disable,
4297 	.get_hw_state = tbt_pll_get_hw_state,
4298 	.get_freq = icl_ddi_tbt_pll_get_freq,
4299 };
4300 
4301 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4302 	.enable = mg_pll_enable,
4303 	.disable = mg_pll_disable,
4304 	.get_hw_state = mg_pll_get_hw_state,
4305 	.get_freq = icl_ddi_mg_pll_get_freq,
4306 };
4307 
4308 static const struct dpll_info icl_plls[] = {
4309 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4310 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4311 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4312 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4313 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4314 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4315 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4316 	{ },
4317 };
4318 
4319 static const struct intel_dpll_mgr icl_pll_mgr = {
4320 	.dpll_info = icl_plls,
4321 	.get_dplls = icl_get_dplls,
4322 	.put_dplls = icl_put_dplls,
4323 	.update_active_dpll = icl_update_active_dpll,
4324 	.update_ref_clks = icl_update_dpll_ref_clks,
4325 	.dump_hw_state = icl_dump_hw_state,
4326 };
4327 
4328 static const struct dpll_info ehl_plls[] = {
4329 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4330 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4331 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4332 	{ },
4333 };
4334 
4335 static const struct intel_dpll_mgr ehl_pll_mgr = {
4336 	.dpll_info = ehl_plls,
4337 	.get_dplls = icl_get_dplls,
4338 	.put_dplls = icl_put_dplls,
4339 	.update_ref_clks = icl_update_dpll_ref_clks,
4340 	.dump_hw_state = icl_dump_hw_state,
4341 };
4342 
4343 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4344 	.enable = mg_pll_enable,
4345 	.disable = mg_pll_disable,
4346 	.get_hw_state = dkl_pll_get_hw_state,
4347 	.get_freq = icl_ddi_mg_pll_get_freq,
4348 };
4349 
4350 static const struct dpll_info tgl_plls[] = {
4351 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4352 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4353 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4354 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4355 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4356 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4357 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4358 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4359 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4360 	{ },
4361 };
4362 
4363 static const struct intel_dpll_mgr tgl_pll_mgr = {
4364 	.dpll_info = tgl_plls,
4365 	.get_dplls = icl_get_dplls,
4366 	.put_dplls = icl_put_dplls,
4367 	.update_active_dpll = icl_update_active_dpll,
4368 	.update_ref_clks = icl_update_dpll_ref_clks,
4369 	.dump_hw_state = icl_dump_hw_state,
4370 };
4371 
4372 static const struct dpll_info rkl_plls[] = {
4373 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4374 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4375 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4376 	{ },
4377 };
4378 
4379 static const struct intel_dpll_mgr rkl_pll_mgr = {
4380 	.dpll_info = rkl_plls,
4381 	.get_dplls = icl_get_dplls,
4382 	.put_dplls = icl_put_dplls,
4383 	.update_ref_clks = icl_update_dpll_ref_clks,
4384 	.dump_hw_state = icl_dump_hw_state,
4385 };
4386 
4387 static const struct dpll_info dg1_plls[] = {
4388 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4389 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4390 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4391 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4392 	{ },
4393 };
4394 
4395 static const struct intel_dpll_mgr dg1_pll_mgr = {
4396 	.dpll_info = dg1_plls,
4397 	.get_dplls = icl_get_dplls,
4398 	.put_dplls = icl_put_dplls,
4399 	.update_ref_clks = icl_update_dpll_ref_clks,
4400 	.dump_hw_state = icl_dump_hw_state,
4401 };
4402 
4403 static const struct dpll_info adls_plls[] = {
4404 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4405 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4406 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4407 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4408 	{ },
4409 };
4410 
4411 static const struct intel_dpll_mgr adls_pll_mgr = {
4412 	.dpll_info = adls_plls,
4413 	.get_dplls = icl_get_dplls,
4414 	.put_dplls = icl_put_dplls,
4415 	.update_ref_clks = icl_update_dpll_ref_clks,
4416 	.dump_hw_state = icl_dump_hw_state,
4417 };
4418 
4419 /**
4420  * intel_shared_dpll_init - Initialize shared DPLLs
4421  * @dev: drm device
4422  *
4423  * Initialize shared DPLLs for @dev.
4424  */
4425 void intel_shared_dpll_init(struct drm_device *dev)
4426 {
4427 	struct drm_i915_private *dev_priv = to_i915(dev);
4428 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4429 	const struct dpll_info *dpll_info;
4430 	int i;
4431 
4432 	if (IS_ALDERLAKE_S(dev_priv))
4433 		dpll_mgr = &adls_pll_mgr;
4434 	else if (IS_DG1(dev_priv))
4435 		dpll_mgr = &dg1_pll_mgr;
4436 	else if (IS_ROCKETLAKE(dev_priv))
4437 		dpll_mgr = &rkl_pll_mgr;
4438 	else if (DISPLAY_VER(dev_priv) >= 12)
4439 		dpll_mgr = &tgl_pll_mgr;
4440 	else if (IS_JSL_EHL(dev_priv))
4441 		dpll_mgr = &ehl_pll_mgr;
4442 	else if (DISPLAY_VER(dev_priv) >= 11)
4443 		dpll_mgr = &icl_pll_mgr;
4444 	else if (IS_CANNONLAKE(dev_priv))
4445 		dpll_mgr = &cnl_pll_mgr;
4446 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4447 		dpll_mgr = &bxt_pll_mgr;
4448 	else if (DISPLAY_VER(dev_priv) == 9)
4449 		dpll_mgr = &skl_pll_mgr;
4450 	else if (HAS_DDI(dev_priv))
4451 		dpll_mgr = &hsw_pll_mgr;
4452 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4453 		dpll_mgr = &pch_pll_mgr;
4454 
4455 	if (!dpll_mgr) {
4456 		dev_priv->dpll.num_shared_dpll = 0;
4457 		return;
4458 	}
4459 
4460 	dpll_info = dpll_mgr->dpll_info;
4461 
4462 	for (i = 0; dpll_info[i].name; i++) {
4463 		drm_WARN_ON(dev, i != dpll_info[i].id);
4464 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4465 	}
4466 
4467 	dev_priv->dpll.mgr = dpll_mgr;
4468 	dev_priv->dpll.num_shared_dpll = i;
4469 	mutex_init(&dev_priv->dpll.lock);
4470 
4471 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4472 }
4473 
4474 /**
4475  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4476  * @state: atomic state
4477  * @crtc: CRTC to reserve DPLLs for
4478  * @encoder: encoder
4479  *
4480  * This function reserves all required DPLLs for the given CRTC and encoder
4481  * combination in the current atomic commit @state and the new @crtc atomic
4482  * state.
4483  *
4484  * The new configuration in the atomic commit @state is made effective by
4485  * calling intel_shared_dpll_swap_state().
4486  *
4487  * The reserved DPLLs should be released by calling
4488  * intel_release_shared_dplls().
4489  *
4490  * Returns:
4491  * True if all required DPLLs were successfully reserved.
4492  */
4493 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4494 				struct intel_crtc *crtc,
4495 				struct intel_encoder *encoder)
4496 {
4497 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4498 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4499 
4500 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4501 		return false;
4502 
4503 	return dpll_mgr->get_dplls(state, crtc, encoder);
4504 }
4505 
4506 /**
4507  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4508  * @state: atomic state
4509  * @crtc: crtc from which the DPLLs are to be released
4510  *
4511  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4512  * from the current atomic commit @state and the old @crtc atomic state.
4513  *
4514  * The new configuration in the atomic commit @state is made effective by
4515  * calling intel_shared_dpll_swap_state().
4516  */
4517 void intel_release_shared_dplls(struct intel_atomic_state *state,
4518 				struct intel_crtc *crtc)
4519 {
4520 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4521 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4522 
4523 	/*
4524 	 * FIXME: this function is called for every platform having a
4525 	 * compute_clock hook, even though the platform doesn't yet support
4526 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4527 	 * called on those.
4528 	 */
4529 	if (!dpll_mgr)
4530 		return;
4531 
4532 	dpll_mgr->put_dplls(state, crtc);
4533 }
4534 
4535 /**
4536  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4537  * @state: atomic state
4538  * @crtc: the CRTC for which to update the active DPLL
4539  * @encoder: encoder determining the type of port DPLL
4540  *
4541  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4542  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4543  * DPLL selected will be based on the current mode of the encoder's port.
4544  */
4545 void intel_update_active_dpll(struct intel_atomic_state *state,
4546 			      struct intel_crtc *crtc,
4547 			      struct intel_encoder *encoder)
4548 {
4549 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4550 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4551 
4552 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4553 		return;
4554 
4555 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4556 }
4557 
4558 /**
4559  * intel_dpll_get_freq - calculate the DPLL's output frequency
4560  * @i915: i915 device
4561  * @pll: DPLL for which to calculate the output frequency
4562  * @pll_state: DPLL state from which to calculate the output frequency
4563  *
4564  * Return the output frequency corresponding to @pll's passed in @pll_state.
4565  */
4566 int intel_dpll_get_freq(struct drm_i915_private *i915,
4567 			const struct intel_shared_dpll *pll,
4568 			const struct intel_dpll_hw_state *pll_state)
4569 {
4570 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4571 		return 0;
4572 
4573 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4574 }
4575 
4576 /**
4577  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4578  * @i915: i915 device
4579  * @pll: DPLL for which to calculate the output frequency
4580  * @hw_state: DPLL's hardware state
4581  *
4582  * Read out @pll's hardware state into @hw_state.
4583  */
4584 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4585 			     struct intel_shared_dpll *pll,
4586 			     struct intel_dpll_hw_state *hw_state)
4587 {
4588 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4589 }
4590 
4591 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4592 				  struct intel_shared_dpll *pll)
4593 {
4594 	struct intel_crtc *crtc;
4595 
4596 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4597 
4598 	if (IS_JSL_EHL(i915) && pll->on &&
4599 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4600 		pll->wakeref = intel_display_power_get(i915,
4601 						       POWER_DOMAIN_DPLL_DC_OFF);
4602 	}
4603 
4604 	pll->state.pipe_mask = 0;
4605 	for_each_intel_crtc(&i915->drm, crtc) {
4606 		struct intel_crtc_state *crtc_state =
4607 			to_intel_crtc_state(crtc->base.state);
4608 
4609 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4610 			pll->state.pipe_mask |= BIT(crtc->pipe);
4611 	}
4612 	pll->active_mask = pll->state.pipe_mask;
4613 
4614 	drm_dbg_kms(&i915->drm,
4615 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4616 		    pll->info->name, pll->state.pipe_mask, pll->on);
4617 }
4618 
4619 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4620 {
4621 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4622 		i915->dpll.mgr->update_ref_clks(i915);
4623 }
4624 
4625 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4626 {
4627 	int i;
4628 
4629 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4630 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4631 }
4632 
4633 static void sanitize_dpll_state(struct drm_i915_private *i915,
4634 				struct intel_shared_dpll *pll)
4635 {
4636 	if (!pll->on || pll->active_mask)
4637 		return;
4638 
4639 	drm_dbg_kms(&i915->drm,
4640 		    "%s enabled but not in use, disabling\n",
4641 		    pll->info->name);
4642 
4643 	pll->info->funcs->disable(i915, pll);
4644 	pll->on = false;
4645 }
4646 
4647 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4648 {
4649 	int i;
4650 
4651 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4652 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4653 }
4654 
4655 /**
4656  * intel_dpll_dump_hw_state - write hw_state to dmesg
4657  * @dev_priv: i915 drm device
4658  * @hw_state: hw state to be written to the log
4659  *
4660  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4661  */
4662 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4663 			      const struct intel_dpll_hw_state *hw_state)
4664 {
4665 	if (dev_priv->dpll.mgr) {
4666 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4667 	} else {
4668 		/* fallback for platforms that don't use the shared dpll
4669 		 * infrastructure
4670 		 */
4671 		drm_dbg_kms(&dev_priv->drm,
4672 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4673 			    "fp0: 0x%x, fp1: 0x%x\n",
4674 			    hw_state->dpll,
4675 			    hw_state->dpll_md,
4676 			    hw_state->fp0,
4677 			    hw_state->fp1);
4678 	}
4679 }
4680