xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "intel_de.h"
27 #include "intel_display_types.h"
28 #include "intel_dpio_phy.h"
29 #include "intel_dpll.h"
30 #include "intel_dpll_mgr.h"
31 #include "intel_pch_refclk.h"
32 #include "intel_tc.h"
33 #include "intel_tc_phy_regs.h"
34 
35 /**
36  * DOC: Display PLLs
37  *
38  * Display PLLs used for driving outputs vary by platform. While some have
39  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
40  * from a pool. In the latter scenario, it is possible that multiple pipes
41  * share a PLL if their configurations match.
42  *
43  * This file provides an abstraction over display PLLs. The function
44  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
45  * users of a PLL are tracked and that tracking is integrated with the atomic
46  * modset interface. During an atomic operation, required PLLs can be reserved
47  * for a given CRTC and encoder configuration by calling
48  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
49  * with intel_release_shared_dplls().
50  * Changes to the users are first staged in the atomic state, and then made
51  * effective by calling intel_shared_dpll_swap_state() during the atomic
52  * commit phase.
53  */
54 
55 /* platform specific hooks for managing DPLLs */
56 struct intel_shared_dpll_funcs {
57 	/*
58 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
59 	 * the pll is not already enabled.
60 	 */
61 	void (*enable)(struct drm_i915_private *i915,
62 		       struct intel_shared_dpll *pll);
63 
64 	/*
65 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
66 	 * only when it is safe to disable the pll, i.e., there are no more
67 	 * tracked users for it.
68 	 */
69 	void (*disable)(struct drm_i915_private *i915,
70 			struct intel_shared_dpll *pll);
71 
72 	/*
73 	 * Hook for reading the values currently programmed to the DPLL
74 	 * registers. This is used for initial hw state readout and state
75 	 * verification after a mode set.
76 	 */
77 	bool (*get_hw_state)(struct drm_i915_private *i915,
78 			     struct intel_shared_dpll *pll,
79 			     struct intel_dpll_hw_state *hw_state);
80 
81 	/*
82 	 * Hook for calculating the pll's output frequency based on its passed
83 	 * in state.
84 	 */
85 	int (*get_freq)(struct drm_i915_private *i915,
86 			const struct intel_shared_dpll *pll,
87 			const struct intel_dpll_hw_state *pll_state);
88 };
89 
90 struct intel_dpll_mgr {
91 	const struct dpll_info *dpll_info;
92 
93 	int (*get_dplls)(struct intel_atomic_state *state,
94 			 struct intel_crtc *crtc,
95 			 struct intel_encoder *encoder);
96 	void (*put_dplls)(struct intel_atomic_state *state,
97 			  struct intel_crtc *crtc);
98 	void (*update_active_dpll)(struct intel_atomic_state *state,
99 				   struct intel_crtc *crtc,
100 				   struct intel_encoder *encoder);
101 	void (*update_ref_clks)(struct drm_i915_private *i915);
102 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
103 			      const struct intel_dpll_hw_state *hw_state);
104 };
105 
106 static void
107 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
108 				  struct intel_shared_dpll_state *shared_dpll)
109 {
110 	enum intel_dpll_id i;
111 
112 	/* Copy shared dpll state */
113 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
114 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
115 
116 		shared_dpll[i] = pll->state;
117 	}
118 }
119 
120 static struct intel_shared_dpll_state *
121 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
122 {
123 	struct intel_atomic_state *state = to_intel_atomic_state(s);
124 
125 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
126 
127 	if (!state->dpll_set) {
128 		state->dpll_set = true;
129 
130 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
131 						  state->shared_dpll);
132 	}
133 
134 	return state->shared_dpll;
135 }
136 
137 /**
138  * intel_get_shared_dpll_by_id - get a DPLL given its id
139  * @dev_priv: i915 device instance
140  * @id: pll id
141  *
142  * Returns:
143  * A pointer to the DPLL with @id
144  */
145 struct intel_shared_dpll *
146 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
147 			    enum intel_dpll_id id)
148 {
149 	return &dev_priv->dpll.shared_dplls[id];
150 }
151 
152 /**
153  * intel_get_shared_dpll_id - get the id of a DPLL
154  * @dev_priv: i915 device instance
155  * @pll: the DPLL
156  *
157  * Returns:
158  * The id of @pll
159  */
160 enum intel_dpll_id
161 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
162 			 struct intel_shared_dpll *pll)
163 {
164 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
165 
166 	if (drm_WARN_ON(&dev_priv->drm,
167 			pll_idx < 0 ||
168 			pll_idx >= dev_priv->dpll.num_shared_dpll))
169 		return -1;
170 
171 	return pll_idx;
172 }
173 
174 /* For ILK+ */
175 void assert_shared_dpll(struct drm_i915_private *dev_priv,
176 			struct intel_shared_dpll *pll,
177 			bool state)
178 {
179 	bool cur_state;
180 	struct intel_dpll_hw_state hw_state;
181 
182 	if (drm_WARN(&dev_priv->drm, !pll,
183 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
184 		return;
185 
186 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
187 	I915_STATE_WARN(cur_state != state,
188 	     "%s assertion failure (expected %s, current %s)\n",
189 			pll->info->name, str_on_off(state),
190 			str_on_off(cur_state));
191 }
192 
193 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
194 {
195 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
196 }
197 
198 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
199 {
200 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
201 }
202 
203 static i915_reg_t
204 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
205 			   struct intel_shared_dpll *pll)
206 {
207 	if (IS_DG1(i915))
208 		return DG1_DPLL_ENABLE(pll->info->id);
209 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
210 		return MG_PLL_ENABLE(0);
211 
212 	return ICL_DPLL_ENABLE(pll->info->id);
213 }
214 
215 static i915_reg_t
216 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
217 			struct intel_shared_dpll *pll)
218 {
219 	const enum intel_dpll_id id = pll->info->id;
220 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
221 
222 	if (IS_ALDERLAKE_P(i915))
223 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
224 
225 	return MG_PLL_ENABLE(tc_port);
226 }
227 
228 /**
229  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
230  * @crtc_state: CRTC, and its state, which has a shared DPLL
231  *
232  * Enable the shared DPLL used by @crtc.
233  */
234 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
235 {
236 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
237 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
238 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
239 	unsigned int pipe_mask = BIT(crtc->pipe);
240 	unsigned int old_mask;
241 
242 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
243 		return;
244 
245 	mutex_lock(&dev_priv->dpll.lock);
246 	old_mask = pll->active_mask;
247 
248 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
249 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
250 		goto out;
251 
252 	pll->active_mask |= pipe_mask;
253 
254 	drm_dbg_kms(&dev_priv->drm,
255 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
256 		    pll->info->name, pll->active_mask, pll->on,
257 		    crtc->base.base.id, crtc->base.name);
258 
259 	if (old_mask) {
260 		drm_WARN_ON(&dev_priv->drm, !pll->on);
261 		assert_shared_dpll_enabled(dev_priv, pll);
262 		goto out;
263 	}
264 	drm_WARN_ON(&dev_priv->drm, pll->on);
265 
266 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
267 	pll->info->funcs->enable(dev_priv, pll);
268 	pll->on = true;
269 
270 out:
271 	mutex_unlock(&dev_priv->dpll.lock);
272 }
273 
274 /**
275  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
276  * @crtc_state: CRTC, and its state, which has a shared DPLL
277  *
278  * Disable the shared DPLL used by @crtc.
279  */
280 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
281 {
282 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
283 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
284 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
285 	unsigned int pipe_mask = BIT(crtc->pipe);
286 
287 	/* PCH only available on ILK+ */
288 	if (DISPLAY_VER(dev_priv) < 5)
289 		return;
290 
291 	if (pll == NULL)
292 		return;
293 
294 	mutex_lock(&dev_priv->dpll.lock);
295 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
296 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
297 		     crtc->base.base.id, crtc->base.name))
298 		goto out;
299 
300 	drm_dbg_kms(&dev_priv->drm,
301 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
302 		    pll->info->name, pll->active_mask, pll->on,
303 		    crtc->base.base.id, crtc->base.name);
304 
305 	assert_shared_dpll_enabled(dev_priv, pll);
306 	drm_WARN_ON(&dev_priv->drm, !pll->on);
307 
308 	pll->active_mask &= ~pipe_mask;
309 	if (pll->active_mask)
310 		goto out;
311 
312 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
313 	pll->info->funcs->disable(dev_priv, pll);
314 	pll->on = false;
315 
316 out:
317 	mutex_unlock(&dev_priv->dpll.lock);
318 }
319 
320 static struct intel_shared_dpll *
321 intel_find_shared_dpll(struct intel_atomic_state *state,
322 		       const struct intel_crtc *crtc,
323 		       const struct intel_dpll_hw_state *pll_state,
324 		       unsigned long dpll_mask)
325 {
326 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
327 	struct intel_shared_dpll *pll, *unused_pll = NULL;
328 	struct intel_shared_dpll_state *shared_dpll;
329 	enum intel_dpll_id i;
330 
331 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
332 
333 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
334 
335 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
336 		pll = &dev_priv->dpll.shared_dplls[i];
337 
338 		/* Only want to check enabled timings first */
339 		if (shared_dpll[i].pipe_mask == 0) {
340 			if (!unused_pll)
341 				unused_pll = pll;
342 			continue;
343 		}
344 
345 		if (memcmp(pll_state,
346 			   &shared_dpll[i].hw_state,
347 			   sizeof(*pll_state)) == 0) {
348 			drm_dbg_kms(&dev_priv->drm,
349 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
350 				    crtc->base.base.id, crtc->base.name,
351 				    pll->info->name,
352 				    shared_dpll[i].pipe_mask,
353 				    pll->active_mask);
354 			return pll;
355 		}
356 	}
357 
358 	/* Ok no matching timings, maybe there's a free one? */
359 	if (unused_pll) {
360 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
361 			    crtc->base.base.id, crtc->base.name,
362 			    unused_pll->info->name);
363 		return unused_pll;
364 	}
365 
366 	return NULL;
367 }
368 
369 static void
370 intel_reference_shared_dpll(struct intel_atomic_state *state,
371 			    const struct intel_crtc *crtc,
372 			    const struct intel_shared_dpll *pll,
373 			    const struct intel_dpll_hw_state *pll_state)
374 {
375 	struct drm_i915_private *i915 = to_i915(state->base.dev);
376 	struct intel_shared_dpll_state *shared_dpll;
377 	const enum intel_dpll_id id = pll->info->id;
378 
379 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
380 
381 	if (shared_dpll[id].pipe_mask == 0)
382 		shared_dpll[id].hw_state = *pll_state;
383 
384 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
385 		pipe_name(crtc->pipe));
386 
387 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
388 }
389 
390 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
391 					  const struct intel_crtc *crtc,
392 					  const struct intel_shared_dpll *pll)
393 {
394 	struct intel_shared_dpll_state *shared_dpll;
395 
396 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
397 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
398 }
399 
400 static void intel_put_dpll(struct intel_atomic_state *state,
401 			   struct intel_crtc *crtc)
402 {
403 	const struct intel_crtc_state *old_crtc_state =
404 		intel_atomic_get_old_crtc_state(state, crtc);
405 	struct intel_crtc_state *new_crtc_state =
406 		intel_atomic_get_new_crtc_state(state, crtc);
407 
408 	new_crtc_state->shared_dpll = NULL;
409 
410 	if (!old_crtc_state->shared_dpll)
411 		return;
412 
413 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
414 }
415 
416 /**
417  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
418  * @state: atomic state
419  *
420  * This is the dpll version of drm_atomic_helper_swap_state() since the
421  * helper does not handle driver-specific global state.
422  *
423  * For consistency with atomic helpers this function does a complete swap,
424  * i.e. it also puts the current state into @state, even though there is no
425  * need for that at this moment.
426  */
427 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
428 {
429 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
430 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
431 	enum intel_dpll_id i;
432 
433 	if (!state->dpll_set)
434 		return;
435 
436 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
437 		struct intel_shared_dpll *pll =
438 			&dev_priv->dpll.shared_dplls[i];
439 
440 		swap(pll->state, shared_dpll[i]);
441 	}
442 }
443 
444 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
445 				      struct intel_shared_dpll *pll,
446 				      struct intel_dpll_hw_state *hw_state)
447 {
448 	const enum intel_dpll_id id = pll->info->id;
449 	intel_wakeref_t wakeref;
450 	u32 val;
451 
452 	wakeref = intel_display_power_get_if_enabled(dev_priv,
453 						     POWER_DOMAIN_DISPLAY_CORE);
454 	if (!wakeref)
455 		return false;
456 
457 	val = intel_de_read(dev_priv, PCH_DPLL(id));
458 	hw_state->dpll = val;
459 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
460 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
461 
462 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
463 
464 	return val & DPLL_VCO_ENABLE;
465 }
466 
467 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
468 {
469 	u32 val;
470 	bool enabled;
471 
472 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
473 
474 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
475 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
476 			    DREF_SUPERSPREAD_SOURCE_MASK));
477 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
478 }
479 
480 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
481 				struct intel_shared_dpll *pll)
482 {
483 	const enum intel_dpll_id id = pll->info->id;
484 
485 	/* PCH refclock must be enabled first */
486 	ibx_assert_pch_refclk_enabled(dev_priv);
487 
488 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
489 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
490 
491 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
492 
493 	/* Wait for the clocks to stabilize. */
494 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
495 	udelay(150);
496 
497 	/* The pixel multiplier can only be updated once the
498 	 * DPLL is enabled and the clocks are stable.
499 	 *
500 	 * So write it again.
501 	 */
502 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
503 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
504 	udelay(200);
505 }
506 
507 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
508 				 struct intel_shared_dpll *pll)
509 {
510 	const enum intel_dpll_id id = pll->info->id;
511 
512 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
513 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
514 	udelay(200);
515 }
516 
517 static int ibx_get_dpll(struct intel_atomic_state *state,
518 			struct intel_crtc *crtc,
519 			struct intel_encoder *encoder)
520 {
521 	struct intel_crtc_state *crtc_state =
522 		intel_atomic_get_new_crtc_state(state, crtc);
523 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
524 	struct intel_shared_dpll *pll;
525 	enum intel_dpll_id i;
526 
527 	if (HAS_PCH_IBX(dev_priv)) {
528 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
529 		i = (enum intel_dpll_id) crtc->pipe;
530 		pll = &dev_priv->dpll.shared_dplls[i];
531 
532 		drm_dbg_kms(&dev_priv->drm,
533 			    "[CRTC:%d:%s] using pre-allocated %s\n",
534 			    crtc->base.base.id, crtc->base.name,
535 			    pll->info->name);
536 	} else {
537 		pll = intel_find_shared_dpll(state, crtc,
538 					     &crtc_state->dpll_hw_state,
539 					     BIT(DPLL_ID_PCH_PLL_B) |
540 					     BIT(DPLL_ID_PCH_PLL_A));
541 	}
542 
543 	if (!pll)
544 		return -EINVAL;
545 
546 	/* reference the pll */
547 	intel_reference_shared_dpll(state, crtc,
548 				    pll, &crtc_state->dpll_hw_state);
549 
550 	crtc_state->shared_dpll = pll;
551 
552 	return 0;
553 }
554 
555 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
556 			      const struct intel_dpll_hw_state *hw_state)
557 {
558 	drm_dbg_kms(&dev_priv->drm,
559 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
560 		    "fp0: 0x%x, fp1: 0x%x\n",
561 		    hw_state->dpll,
562 		    hw_state->dpll_md,
563 		    hw_state->fp0,
564 		    hw_state->fp1);
565 }
566 
567 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
568 	.enable = ibx_pch_dpll_enable,
569 	.disable = ibx_pch_dpll_disable,
570 	.get_hw_state = ibx_pch_dpll_get_hw_state,
571 };
572 
573 static const struct dpll_info pch_plls[] = {
574 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
575 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
576 	{ },
577 };
578 
579 static const struct intel_dpll_mgr pch_pll_mgr = {
580 	.dpll_info = pch_plls,
581 	.get_dplls = ibx_get_dpll,
582 	.put_dplls = intel_put_dpll,
583 	.dump_hw_state = ibx_dump_hw_state,
584 };
585 
586 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
587 				 struct intel_shared_dpll *pll)
588 {
589 	const enum intel_dpll_id id = pll->info->id;
590 
591 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
592 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
593 	udelay(20);
594 }
595 
596 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
597 				struct intel_shared_dpll *pll)
598 {
599 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
600 	intel_de_posting_read(dev_priv, SPLL_CTL);
601 	udelay(20);
602 }
603 
604 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
605 				  struct intel_shared_dpll *pll)
606 {
607 	const enum intel_dpll_id id = pll->info->id;
608 	u32 val;
609 
610 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
611 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
612 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
613 
614 	/*
615 	 * Try to set up the PCH reference clock once all DPLLs
616 	 * that depend on it have been shut down.
617 	 */
618 	if (dev_priv->pch_ssc_use & BIT(id))
619 		intel_init_pch_refclk(dev_priv);
620 }
621 
622 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
623 				 struct intel_shared_dpll *pll)
624 {
625 	enum intel_dpll_id id = pll->info->id;
626 	u32 val;
627 
628 	val = intel_de_read(dev_priv, SPLL_CTL);
629 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
630 	intel_de_posting_read(dev_priv, SPLL_CTL);
631 
632 	/*
633 	 * Try to set up the PCH reference clock once all DPLLs
634 	 * that depend on it have been shut down.
635 	 */
636 	if (dev_priv->pch_ssc_use & BIT(id))
637 		intel_init_pch_refclk(dev_priv);
638 }
639 
640 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
641 				       struct intel_shared_dpll *pll,
642 				       struct intel_dpll_hw_state *hw_state)
643 {
644 	const enum intel_dpll_id id = pll->info->id;
645 	intel_wakeref_t wakeref;
646 	u32 val;
647 
648 	wakeref = intel_display_power_get_if_enabled(dev_priv,
649 						     POWER_DOMAIN_DISPLAY_CORE);
650 	if (!wakeref)
651 		return false;
652 
653 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
654 	hw_state->wrpll = val;
655 
656 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
657 
658 	return val & WRPLL_PLL_ENABLE;
659 }
660 
661 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
662 				      struct intel_shared_dpll *pll,
663 				      struct intel_dpll_hw_state *hw_state)
664 {
665 	intel_wakeref_t wakeref;
666 	u32 val;
667 
668 	wakeref = intel_display_power_get_if_enabled(dev_priv,
669 						     POWER_DOMAIN_DISPLAY_CORE);
670 	if (!wakeref)
671 		return false;
672 
673 	val = intel_de_read(dev_priv, SPLL_CTL);
674 	hw_state->spll = val;
675 
676 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
677 
678 	return val & SPLL_PLL_ENABLE;
679 }
680 
681 #define LC_FREQ 2700
682 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
683 
684 #define P_MIN 2
685 #define P_MAX 64
686 #define P_INC 2
687 
688 /* Constraints for PLL good behavior */
689 #define REF_MIN 48
690 #define REF_MAX 400
691 #define VCO_MIN 2400
692 #define VCO_MAX 4800
693 
694 struct hsw_wrpll_rnp {
695 	unsigned p, n2, r2;
696 };
697 
698 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
699 {
700 	unsigned budget;
701 
702 	switch (clock) {
703 	case 25175000:
704 	case 25200000:
705 	case 27000000:
706 	case 27027000:
707 	case 37762500:
708 	case 37800000:
709 	case 40500000:
710 	case 40541000:
711 	case 54000000:
712 	case 54054000:
713 	case 59341000:
714 	case 59400000:
715 	case 72000000:
716 	case 74176000:
717 	case 74250000:
718 	case 81000000:
719 	case 81081000:
720 	case 89012000:
721 	case 89100000:
722 	case 108000000:
723 	case 108108000:
724 	case 111264000:
725 	case 111375000:
726 	case 148352000:
727 	case 148500000:
728 	case 162000000:
729 	case 162162000:
730 	case 222525000:
731 	case 222750000:
732 	case 296703000:
733 	case 297000000:
734 		budget = 0;
735 		break;
736 	case 233500000:
737 	case 245250000:
738 	case 247750000:
739 	case 253250000:
740 	case 298000000:
741 		budget = 1500;
742 		break;
743 	case 169128000:
744 	case 169500000:
745 	case 179500000:
746 	case 202000000:
747 		budget = 2000;
748 		break;
749 	case 256250000:
750 	case 262500000:
751 	case 270000000:
752 	case 272500000:
753 	case 273750000:
754 	case 280750000:
755 	case 281250000:
756 	case 286000000:
757 	case 291750000:
758 		budget = 4000;
759 		break;
760 	case 267250000:
761 	case 268500000:
762 		budget = 5000;
763 		break;
764 	default:
765 		budget = 1000;
766 		break;
767 	}
768 
769 	return budget;
770 }
771 
772 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
773 				 unsigned int r2, unsigned int n2,
774 				 unsigned int p,
775 				 struct hsw_wrpll_rnp *best)
776 {
777 	u64 a, b, c, d, diff, diff_best;
778 
779 	/* No best (r,n,p) yet */
780 	if (best->p == 0) {
781 		best->p = p;
782 		best->n2 = n2;
783 		best->r2 = r2;
784 		return;
785 	}
786 
787 	/*
788 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
789 	 * freq2k.
790 	 *
791 	 * delta = 1e6 *
792 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
793 	 *	   freq2k;
794 	 *
795 	 * and we would like delta <= budget.
796 	 *
797 	 * If the discrepancy is above the PPM-based budget, always prefer to
798 	 * improve upon the previous solution.  However, if you're within the
799 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
800 	 */
801 	a = freq2k * budget * p * r2;
802 	b = freq2k * budget * best->p * best->r2;
803 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
804 	diff_best = abs_diff(freq2k * best->p * best->r2,
805 			     LC_FREQ_2K * best->n2);
806 	c = 1000000 * diff;
807 	d = 1000000 * diff_best;
808 
809 	if (a < c && b < d) {
810 		/* If both are above the budget, pick the closer */
811 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
812 			best->p = p;
813 			best->n2 = n2;
814 			best->r2 = r2;
815 		}
816 	} else if (a >= c && b < d) {
817 		/* If A is below the threshold but B is above it?  Update. */
818 		best->p = p;
819 		best->n2 = n2;
820 		best->r2 = r2;
821 	} else if (a >= c && b >= d) {
822 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
823 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
824 			best->p = p;
825 			best->n2 = n2;
826 			best->r2 = r2;
827 		}
828 	}
829 	/* Otherwise a < c && b >= d, do nothing */
830 }
831 
832 static void
833 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
834 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
835 {
836 	u64 freq2k;
837 	unsigned p, n2, r2;
838 	struct hsw_wrpll_rnp best = {};
839 	unsigned budget;
840 
841 	freq2k = clock / 100;
842 
843 	budget = hsw_wrpll_get_budget_for_freq(clock);
844 
845 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
846 	 * and directly pass the LC PLL to it. */
847 	if (freq2k == 5400000) {
848 		*n2_out = 2;
849 		*p_out = 1;
850 		*r2_out = 2;
851 		return;
852 	}
853 
854 	/*
855 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
856 	 * the WR PLL.
857 	 *
858 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
859 	 * Injecting R2 = 2 * R gives:
860 	 *   REF_MAX * r2 > LC_FREQ * 2 and
861 	 *   REF_MIN * r2 < LC_FREQ * 2
862 	 *
863 	 * Which means the desired boundaries for r2 are:
864 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
865 	 *
866 	 */
867 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
868 	     r2 <= LC_FREQ * 2 / REF_MIN;
869 	     r2++) {
870 
871 		/*
872 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
873 		 *
874 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
875 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
876 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
877 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
878 		 *
879 		 * Which means the desired boundaries for n2 are:
880 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
881 		 */
882 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
883 		     n2 <= VCO_MAX * r2 / LC_FREQ;
884 		     n2++) {
885 
886 			for (p = P_MIN; p <= P_MAX; p += P_INC)
887 				hsw_wrpll_update_rnp(freq2k, budget,
888 						     r2, n2, p, &best);
889 		}
890 	}
891 
892 	*n2_out = best.n2;
893 	*p_out = best.p;
894 	*r2_out = best.r2;
895 }
896 
897 static struct intel_shared_dpll *
898 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
899 		       struct intel_crtc *crtc)
900 {
901 	struct intel_crtc_state *crtc_state =
902 		intel_atomic_get_new_crtc_state(state, crtc);
903 	struct intel_shared_dpll *pll;
904 	u32 val;
905 	unsigned int p, n2, r2;
906 
907 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
908 
909 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
910 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
911 	      WRPLL_DIVIDER_POST(p);
912 
913 	crtc_state->dpll_hw_state.wrpll = val;
914 
915 	pll = intel_find_shared_dpll(state, crtc,
916 				     &crtc_state->dpll_hw_state,
917 				     BIT(DPLL_ID_WRPLL2) |
918 				     BIT(DPLL_ID_WRPLL1));
919 
920 	if (!pll)
921 		return NULL;
922 
923 	return pll;
924 }
925 
926 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
927 				  const struct intel_shared_dpll *pll,
928 				  const struct intel_dpll_hw_state *pll_state)
929 {
930 	int refclk;
931 	int n, p, r;
932 	u32 wrpll = pll_state->wrpll;
933 
934 	switch (wrpll & WRPLL_REF_MASK) {
935 	case WRPLL_REF_SPECIAL_HSW:
936 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
937 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
938 			refclk = dev_priv->dpll.ref_clks.nssc;
939 			break;
940 		}
941 		fallthrough;
942 	case WRPLL_REF_PCH_SSC:
943 		/*
944 		 * We could calculate spread here, but our checking
945 		 * code only cares about 5% accuracy, and spread is a max of
946 		 * 0.5% downspread.
947 		 */
948 		refclk = dev_priv->dpll.ref_clks.ssc;
949 		break;
950 	case WRPLL_REF_LCPLL:
951 		refclk = 2700000;
952 		break;
953 	default:
954 		MISSING_CASE(wrpll);
955 		return 0;
956 	}
957 
958 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
959 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
960 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
961 
962 	/* Convert to KHz, p & r have a fixed point portion */
963 	return (refclk * n / 10) / (p * r) * 2;
964 }
965 
966 static struct intel_shared_dpll *
967 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
968 {
969 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
970 	struct intel_shared_dpll *pll;
971 	enum intel_dpll_id pll_id;
972 	int clock = crtc_state->port_clock;
973 
974 	switch (clock / 2) {
975 	case 81000:
976 		pll_id = DPLL_ID_LCPLL_810;
977 		break;
978 	case 135000:
979 		pll_id = DPLL_ID_LCPLL_1350;
980 		break;
981 	case 270000:
982 		pll_id = DPLL_ID_LCPLL_2700;
983 		break;
984 	default:
985 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
986 			    clock);
987 		return NULL;
988 	}
989 
990 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
991 
992 	if (!pll)
993 		return NULL;
994 
995 	return pll;
996 }
997 
998 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
999 				  const struct intel_shared_dpll *pll,
1000 				  const struct intel_dpll_hw_state *pll_state)
1001 {
1002 	int link_clock = 0;
1003 
1004 	switch (pll->info->id) {
1005 	case DPLL_ID_LCPLL_810:
1006 		link_clock = 81000;
1007 		break;
1008 	case DPLL_ID_LCPLL_1350:
1009 		link_clock = 135000;
1010 		break;
1011 	case DPLL_ID_LCPLL_2700:
1012 		link_clock = 270000;
1013 		break;
1014 	default:
1015 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1016 		break;
1017 	}
1018 
1019 	return link_clock * 2;
1020 }
1021 
1022 static struct intel_shared_dpll *
1023 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1024 		      struct intel_crtc *crtc)
1025 {
1026 	struct intel_crtc_state *crtc_state =
1027 		intel_atomic_get_new_crtc_state(state, crtc);
1028 
1029 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1030 		return NULL;
1031 
1032 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1033 					 SPLL_REF_MUXED_SSC;
1034 
1035 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1036 				      BIT(DPLL_ID_SPLL));
1037 }
1038 
1039 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1040 				 const struct intel_shared_dpll *pll,
1041 				 const struct intel_dpll_hw_state *pll_state)
1042 {
1043 	int link_clock = 0;
1044 
1045 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1046 	case SPLL_FREQ_810MHz:
1047 		link_clock = 81000;
1048 		break;
1049 	case SPLL_FREQ_1350MHz:
1050 		link_clock = 135000;
1051 		break;
1052 	case SPLL_FREQ_2700MHz:
1053 		link_clock = 270000;
1054 		break;
1055 	default:
1056 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1057 		break;
1058 	}
1059 
1060 	return link_clock * 2;
1061 }
1062 
1063 static int hsw_get_dpll(struct intel_atomic_state *state,
1064 			struct intel_crtc *crtc,
1065 			struct intel_encoder *encoder)
1066 {
1067 	struct intel_crtc_state *crtc_state =
1068 		intel_atomic_get_new_crtc_state(state, crtc);
1069 	struct intel_shared_dpll *pll = NULL;
1070 
1071 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1072 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1073 	else if (intel_crtc_has_dp_encoder(crtc_state))
1074 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1075 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1076 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1077 
1078 	if (!pll)
1079 		return -EINVAL;
1080 
1081 	intel_reference_shared_dpll(state, crtc,
1082 				    pll, &crtc_state->dpll_hw_state);
1083 
1084 	crtc_state->shared_dpll = pll;
1085 
1086 	return 0;
1087 }
1088 
1089 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1090 {
1091 	i915->dpll.ref_clks.ssc = 135000;
1092 	/* Non-SSC is only used on non-ULT HSW. */
1093 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1094 		i915->dpll.ref_clks.nssc = 24000;
1095 	else
1096 		i915->dpll.ref_clks.nssc = 135000;
1097 }
1098 
1099 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1100 			      const struct intel_dpll_hw_state *hw_state)
1101 {
1102 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1103 		    hw_state->wrpll, hw_state->spll);
1104 }
1105 
1106 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1107 	.enable = hsw_ddi_wrpll_enable,
1108 	.disable = hsw_ddi_wrpll_disable,
1109 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1110 	.get_freq = hsw_ddi_wrpll_get_freq,
1111 };
1112 
1113 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1114 	.enable = hsw_ddi_spll_enable,
1115 	.disable = hsw_ddi_spll_disable,
1116 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1117 	.get_freq = hsw_ddi_spll_get_freq,
1118 };
1119 
1120 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1121 				 struct intel_shared_dpll *pll)
1122 {
1123 }
1124 
1125 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1126 				  struct intel_shared_dpll *pll)
1127 {
1128 }
1129 
1130 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1131 				       struct intel_shared_dpll *pll,
1132 				       struct intel_dpll_hw_state *hw_state)
1133 {
1134 	return true;
1135 }
1136 
1137 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1138 	.enable = hsw_ddi_lcpll_enable,
1139 	.disable = hsw_ddi_lcpll_disable,
1140 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1141 	.get_freq = hsw_ddi_lcpll_get_freq,
1142 };
1143 
1144 static const struct dpll_info hsw_plls[] = {
1145 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1146 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1147 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1148 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1149 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1150 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1151 	{ },
1152 };
1153 
1154 static const struct intel_dpll_mgr hsw_pll_mgr = {
1155 	.dpll_info = hsw_plls,
1156 	.get_dplls = hsw_get_dpll,
1157 	.put_dplls = intel_put_dpll,
1158 	.update_ref_clks = hsw_update_dpll_ref_clks,
1159 	.dump_hw_state = hsw_dump_hw_state,
1160 };
1161 
1162 struct skl_dpll_regs {
1163 	i915_reg_t ctl, cfgcr1, cfgcr2;
1164 };
1165 
1166 /* this array is indexed by the *shared* pll id */
1167 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1168 	{
1169 		/* DPLL 0 */
1170 		.ctl = LCPLL1_CTL,
1171 		/* DPLL 0 doesn't support HDMI mode */
1172 	},
1173 	{
1174 		/* DPLL 1 */
1175 		.ctl = LCPLL2_CTL,
1176 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1177 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1178 	},
1179 	{
1180 		/* DPLL 2 */
1181 		.ctl = WRPLL_CTL(0),
1182 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1183 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1184 	},
1185 	{
1186 		/* DPLL 3 */
1187 		.ctl = WRPLL_CTL(1),
1188 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1189 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1190 	},
1191 };
1192 
1193 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1194 				    struct intel_shared_dpll *pll)
1195 {
1196 	const enum intel_dpll_id id = pll->info->id;
1197 	u32 val;
1198 
1199 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1200 
1201 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1202 		 DPLL_CTRL1_SSC(id) |
1203 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1204 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1205 
1206 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1207 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1208 }
1209 
1210 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1211 			       struct intel_shared_dpll *pll)
1212 {
1213 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1214 	const enum intel_dpll_id id = pll->info->id;
1215 
1216 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1217 
1218 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1219 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1220 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1221 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1222 
1223 	/* the enable bit is always bit 31 */
1224 	intel_de_write(dev_priv, regs[id].ctl,
1225 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1226 
1227 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1228 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1229 }
1230 
1231 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1232 				 struct intel_shared_dpll *pll)
1233 {
1234 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1235 }
1236 
1237 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1238 				struct intel_shared_dpll *pll)
1239 {
1240 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1241 	const enum intel_dpll_id id = pll->info->id;
1242 
1243 	/* the enable bit is always bit 31 */
1244 	intel_de_write(dev_priv, regs[id].ctl,
1245 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1246 	intel_de_posting_read(dev_priv, regs[id].ctl);
1247 }
1248 
1249 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1250 				  struct intel_shared_dpll *pll)
1251 {
1252 }
1253 
1254 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1255 				     struct intel_shared_dpll *pll,
1256 				     struct intel_dpll_hw_state *hw_state)
1257 {
1258 	u32 val;
1259 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1260 	const enum intel_dpll_id id = pll->info->id;
1261 	intel_wakeref_t wakeref;
1262 	bool ret;
1263 
1264 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1265 						     POWER_DOMAIN_DISPLAY_CORE);
1266 	if (!wakeref)
1267 		return false;
1268 
1269 	ret = false;
1270 
1271 	val = intel_de_read(dev_priv, regs[id].ctl);
1272 	if (!(val & LCPLL_PLL_ENABLE))
1273 		goto out;
1274 
1275 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1276 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1277 
1278 	/* avoid reading back stale values if HDMI mode is not enabled */
1279 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1280 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1281 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1282 	}
1283 	ret = true;
1284 
1285 out:
1286 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1287 
1288 	return ret;
1289 }
1290 
1291 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1292 				       struct intel_shared_dpll *pll,
1293 				       struct intel_dpll_hw_state *hw_state)
1294 {
1295 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1296 	const enum intel_dpll_id id = pll->info->id;
1297 	intel_wakeref_t wakeref;
1298 	u32 val;
1299 	bool ret;
1300 
1301 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1302 						     POWER_DOMAIN_DISPLAY_CORE);
1303 	if (!wakeref)
1304 		return false;
1305 
1306 	ret = false;
1307 
1308 	/* DPLL0 is always enabled since it drives CDCLK */
1309 	val = intel_de_read(dev_priv, regs[id].ctl);
1310 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1311 		goto out;
1312 
1313 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1314 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1315 
1316 	ret = true;
1317 
1318 out:
1319 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1320 
1321 	return ret;
1322 }
1323 
1324 struct skl_wrpll_context {
1325 	u64 min_deviation;		/* current minimal deviation */
1326 	u64 central_freq;		/* chosen central freq */
1327 	u64 dco_freq;			/* chosen dco freq */
1328 	unsigned int p;			/* chosen divider */
1329 };
1330 
1331 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1332 #define SKL_DCO_MAX_PDEVIATION	100
1333 #define SKL_DCO_MAX_NDEVIATION	600
1334 
1335 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1336 				  u64 central_freq,
1337 				  u64 dco_freq,
1338 				  unsigned int divider)
1339 {
1340 	u64 deviation;
1341 
1342 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1343 			      central_freq);
1344 
1345 	/* positive deviation */
1346 	if (dco_freq >= central_freq) {
1347 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1348 		    deviation < ctx->min_deviation) {
1349 			ctx->min_deviation = deviation;
1350 			ctx->central_freq = central_freq;
1351 			ctx->dco_freq = dco_freq;
1352 			ctx->p = divider;
1353 		}
1354 	/* negative deviation */
1355 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1356 		   deviation < ctx->min_deviation) {
1357 		ctx->min_deviation = deviation;
1358 		ctx->central_freq = central_freq;
1359 		ctx->dco_freq = dco_freq;
1360 		ctx->p = divider;
1361 	}
1362 }
1363 
1364 static void skl_wrpll_get_multipliers(unsigned int p,
1365 				      unsigned int *p0 /* out */,
1366 				      unsigned int *p1 /* out */,
1367 				      unsigned int *p2 /* out */)
1368 {
1369 	/* even dividers */
1370 	if (p % 2 == 0) {
1371 		unsigned int half = p / 2;
1372 
1373 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1374 			*p0 = 2;
1375 			*p1 = 1;
1376 			*p2 = half;
1377 		} else if (half % 2 == 0) {
1378 			*p0 = 2;
1379 			*p1 = half / 2;
1380 			*p2 = 2;
1381 		} else if (half % 3 == 0) {
1382 			*p0 = 3;
1383 			*p1 = half / 3;
1384 			*p2 = 2;
1385 		} else if (half % 7 == 0) {
1386 			*p0 = 7;
1387 			*p1 = half / 7;
1388 			*p2 = 2;
1389 		}
1390 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1391 		*p0 = 3;
1392 		*p1 = 1;
1393 		*p2 = p / 3;
1394 	} else if (p == 5 || p == 7) {
1395 		*p0 = p;
1396 		*p1 = 1;
1397 		*p2 = 1;
1398 	} else if (p == 15) {
1399 		*p0 = 3;
1400 		*p1 = 1;
1401 		*p2 = 5;
1402 	} else if (p == 21) {
1403 		*p0 = 7;
1404 		*p1 = 1;
1405 		*p2 = 3;
1406 	} else if (p == 35) {
1407 		*p0 = 7;
1408 		*p1 = 1;
1409 		*p2 = 5;
1410 	}
1411 }
1412 
1413 struct skl_wrpll_params {
1414 	u32 dco_fraction;
1415 	u32 dco_integer;
1416 	u32 qdiv_ratio;
1417 	u32 qdiv_mode;
1418 	u32 kdiv;
1419 	u32 pdiv;
1420 	u32 central_freq;
1421 };
1422 
1423 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1424 				      u64 afe_clock,
1425 				      int ref_clock,
1426 				      u64 central_freq,
1427 				      u32 p0, u32 p1, u32 p2)
1428 {
1429 	u64 dco_freq;
1430 
1431 	switch (central_freq) {
1432 	case 9600000000ULL:
1433 		params->central_freq = 0;
1434 		break;
1435 	case 9000000000ULL:
1436 		params->central_freq = 1;
1437 		break;
1438 	case 8400000000ULL:
1439 		params->central_freq = 3;
1440 	}
1441 
1442 	switch (p0) {
1443 	case 1:
1444 		params->pdiv = 0;
1445 		break;
1446 	case 2:
1447 		params->pdiv = 1;
1448 		break;
1449 	case 3:
1450 		params->pdiv = 2;
1451 		break;
1452 	case 7:
1453 		params->pdiv = 4;
1454 		break;
1455 	default:
1456 		WARN(1, "Incorrect PDiv\n");
1457 	}
1458 
1459 	switch (p2) {
1460 	case 5:
1461 		params->kdiv = 0;
1462 		break;
1463 	case 2:
1464 		params->kdiv = 1;
1465 		break;
1466 	case 3:
1467 		params->kdiv = 2;
1468 		break;
1469 	case 1:
1470 		params->kdiv = 3;
1471 		break;
1472 	default:
1473 		WARN(1, "Incorrect KDiv\n");
1474 	}
1475 
1476 	params->qdiv_ratio = p1;
1477 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1478 
1479 	dco_freq = p0 * p1 * p2 * afe_clock;
1480 
1481 	/*
1482 	 * Intermediate values are in Hz.
1483 	 * Divide by MHz to match bsepc
1484 	 */
1485 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1486 	params->dco_fraction =
1487 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1488 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1489 }
1490 
1491 static int
1492 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1493 			int ref_clock,
1494 			struct skl_wrpll_params *wrpll_params)
1495 {
1496 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1497 						 9000000000ULL,
1498 						 9600000000ULL };
1499 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1500 					    24, 28, 30, 32, 36, 40, 42, 44,
1501 					    48, 52, 54, 56, 60, 64, 66, 68,
1502 					    70, 72, 76, 78, 80, 84, 88, 90,
1503 					    92, 96, 98 };
1504 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1505 	static const struct {
1506 		const u8 *list;
1507 		int n_dividers;
1508 	} dividers[] = {
1509 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1510 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1511 	};
1512 	struct skl_wrpll_context ctx = {
1513 		.min_deviation = U64_MAX,
1514 	};
1515 	unsigned int dco, d, i;
1516 	unsigned int p0, p1, p2;
1517 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1518 
1519 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1520 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1521 			for (i = 0; i < dividers[d].n_dividers; i++) {
1522 				unsigned int p = dividers[d].list[i];
1523 				u64 dco_freq = p * afe_clock;
1524 
1525 				skl_wrpll_try_divider(&ctx,
1526 						      dco_central_freq[dco],
1527 						      dco_freq,
1528 						      p);
1529 				/*
1530 				 * Skip the remaining dividers if we're sure to
1531 				 * have found the definitive divider, we can't
1532 				 * improve a 0 deviation.
1533 				 */
1534 				if (ctx.min_deviation == 0)
1535 					goto skip_remaining_dividers;
1536 			}
1537 		}
1538 
1539 skip_remaining_dividers:
1540 		/*
1541 		 * If a solution is found with an even divider, prefer
1542 		 * this one.
1543 		 */
1544 		if (d == 0 && ctx.p)
1545 			break;
1546 	}
1547 
1548 	if (!ctx.p) {
1549 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1550 		return -EINVAL;
1551 	}
1552 
1553 	/*
1554 	 * gcc incorrectly analyses that these can be used without being
1555 	 * initialized. To be fair, it's hard to guess.
1556 	 */
1557 	p0 = p1 = p2 = 0;
1558 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1559 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1560 				  ctx.central_freq, p0, p1, p2);
1561 
1562 	return 0;
1563 }
1564 
1565 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1566 {
1567 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1568 	struct skl_wrpll_params wrpll_params = {};
1569 	u32 ctrl1, cfgcr1, cfgcr2;
1570 	int ret;
1571 
1572 	/*
1573 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1574 	 * as the DPLL id in this function.
1575 	 */
1576 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1577 
1578 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1579 
1580 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1581 				      i915->dpll.ref_clks.nssc, &wrpll_params);
1582 	if (ret)
1583 		return ret;
1584 
1585 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1586 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1587 		wrpll_params.dco_integer;
1588 
1589 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1590 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1591 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1592 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1593 		wrpll_params.central_freq;
1594 
1595 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1596 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1597 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1598 
1599 	return 0;
1600 }
1601 
1602 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1603 				  const struct intel_shared_dpll *pll,
1604 				  const struct intel_dpll_hw_state *pll_state)
1605 {
1606 	int ref_clock = i915->dpll.ref_clks.nssc;
1607 	u32 p0, p1, p2, dco_freq;
1608 
1609 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1610 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1611 
1612 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1613 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1614 	else
1615 		p1 = 1;
1616 
1617 
1618 	switch (p0) {
1619 	case DPLL_CFGCR2_PDIV_1:
1620 		p0 = 1;
1621 		break;
1622 	case DPLL_CFGCR2_PDIV_2:
1623 		p0 = 2;
1624 		break;
1625 	case DPLL_CFGCR2_PDIV_3:
1626 		p0 = 3;
1627 		break;
1628 	case DPLL_CFGCR2_PDIV_7_INVALID:
1629 		/*
1630 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1631 		 * handling it the same way as PDIV_7.
1632 		 */
1633 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1634 		fallthrough;
1635 	case DPLL_CFGCR2_PDIV_7:
1636 		p0 = 7;
1637 		break;
1638 	default:
1639 		MISSING_CASE(p0);
1640 		return 0;
1641 	}
1642 
1643 	switch (p2) {
1644 	case DPLL_CFGCR2_KDIV_5:
1645 		p2 = 5;
1646 		break;
1647 	case DPLL_CFGCR2_KDIV_2:
1648 		p2 = 2;
1649 		break;
1650 	case DPLL_CFGCR2_KDIV_3:
1651 		p2 = 3;
1652 		break;
1653 	case DPLL_CFGCR2_KDIV_1:
1654 		p2 = 1;
1655 		break;
1656 	default:
1657 		MISSING_CASE(p2);
1658 		return 0;
1659 	}
1660 
1661 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1662 		   ref_clock;
1663 
1664 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1665 		    ref_clock / 0x8000;
1666 
1667 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1668 		return 0;
1669 
1670 	return dco_freq / (p0 * p1 * p2 * 5);
1671 }
1672 
1673 static int
1674 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1675 {
1676 	u32 ctrl1;
1677 
1678 	/*
1679 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1680 	 * as the DPLL id in this function.
1681 	 */
1682 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1683 	switch (crtc_state->port_clock / 2) {
1684 	case 81000:
1685 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1686 		break;
1687 	case 135000:
1688 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1689 		break;
1690 	case 270000:
1691 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1692 		break;
1693 		/* eDP 1.4 rates */
1694 	case 162000:
1695 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1696 		break;
1697 	case 108000:
1698 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1699 		break;
1700 	case 216000:
1701 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1702 		break;
1703 	}
1704 
1705 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1706 
1707 	return 0;
1708 }
1709 
1710 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1711 				  const struct intel_shared_dpll *pll,
1712 				  const struct intel_dpll_hw_state *pll_state)
1713 {
1714 	int link_clock = 0;
1715 
1716 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1717 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1718 	case DPLL_CTRL1_LINK_RATE_810:
1719 		link_clock = 81000;
1720 		break;
1721 	case DPLL_CTRL1_LINK_RATE_1080:
1722 		link_clock = 108000;
1723 		break;
1724 	case DPLL_CTRL1_LINK_RATE_1350:
1725 		link_clock = 135000;
1726 		break;
1727 	case DPLL_CTRL1_LINK_RATE_1620:
1728 		link_clock = 162000;
1729 		break;
1730 	case DPLL_CTRL1_LINK_RATE_2160:
1731 		link_clock = 216000;
1732 		break;
1733 	case DPLL_CTRL1_LINK_RATE_2700:
1734 		link_clock = 270000;
1735 		break;
1736 	default:
1737 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1738 		break;
1739 	}
1740 
1741 	return link_clock * 2;
1742 }
1743 
1744 static int skl_get_dpll(struct intel_atomic_state *state,
1745 			struct intel_crtc *crtc,
1746 			struct intel_encoder *encoder)
1747 {
1748 	struct intel_crtc_state *crtc_state =
1749 		intel_atomic_get_new_crtc_state(state, crtc);
1750 	struct intel_shared_dpll *pll;
1751 	int ret;
1752 
1753 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1754 		ret = skl_ddi_hdmi_pll_dividers(crtc_state);
1755 	else if (intel_crtc_has_dp_encoder(crtc_state))
1756 		ret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1757 	else
1758 		ret = -EINVAL;
1759 	if (ret)
1760 		return ret;
1761 
1762 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1763 		pll = intel_find_shared_dpll(state, crtc,
1764 					     &crtc_state->dpll_hw_state,
1765 					     BIT(DPLL_ID_SKL_DPLL0));
1766 	else
1767 		pll = intel_find_shared_dpll(state, crtc,
1768 					     &crtc_state->dpll_hw_state,
1769 					     BIT(DPLL_ID_SKL_DPLL3) |
1770 					     BIT(DPLL_ID_SKL_DPLL2) |
1771 					     BIT(DPLL_ID_SKL_DPLL1));
1772 	if (!pll)
1773 		return -EINVAL;
1774 
1775 	intel_reference_shared_dpll(state, crtc,
1776 				    pll, &crtc_state->dpll_hw_state);
1777 
1778 	crtc_state->shared_dpll = pll;
1779 
1780 	return 0;
1781 }
1782 
1783 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1784 				const struct intel_shared_dpll *pll,
1785 				const struct intel_dpll_hw_state *pll_state)
1786 {
1787 	/*
1788 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1789 	 * the internal shift for each field
1790 	 */
1791 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1792 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1793 	else
1794 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1795 }
1796 
1797 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1798 {
1799 	/* No SSC ref */
1800 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1801 }
1802 
1803 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1804 			      const struct intel_dpll_hw_state *hw_state)
1805 {
1806 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1807 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1808 		      hw_state->ctrl1,
1809 		      hw_state->cfgcr1,
1810 		      hw_state->cfgcr2);
1811 }
1812 
1813 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1814 	.enable = skl_ddi_pll_enable,
1815 	.disable = skl_ddi_pll_disable,
1816 	.get_hw_state = skl_ddi_pll_get_hw_state,
1817 	.get_freq = skl_ddi_pll_get_freq,
1818 };
1819 
1820 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1821 	.enable = skl_ddi_dpll0_enable,
1822 	.disable = skl_ddi_dpll0_disable,
1823 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1824 	.get_freq = skl_ddi_pll_get_freq,
1825 };
1826 
1827 static const struct dpll_info skl_plls[] = {
1828 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1829 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1830 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1831 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1832 	{ },
1833 };
1834 
1835 static const struct intel_dpll_mgr skl_pll_mgr = {
1836 	.dpll_info = skl_plls,
1837 	.get_dplls = skl_get_dpll,
1838 	.put_dplls = intel_put_dpll,
1839 	.update_ref_clks = skl_update_dpll_ref_clks,
1840 	.dump_hw_state = skl_dump_hw_state,
1841 };
1842 
1843 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1844 				struct intel_shared_dpll *pll)
1845 {
1846 	u32 temp;
1847 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1848 	enum dpio_phy phy;
1849 	enum dpio_channel ch;
1850 
1851 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1852 
1853 	/* Non-SSC reference */
1854 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1855 	temp |= PORT_PLL_REF_SEL;
1856 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1857 
1858 	if (IS_GEMINILAKE(dev_priv)) {
1859 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1860 		temp |= PORT_PLL_POWER_ENABLE;
1861 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1862 
1863 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1864 				 PORT_PLL_POWER_STATE), 200))
1865 			drm_err(&dev_priv->drm,
1866 				"Power state not set for PLL:%d\n", port);
1867 	}
1868 
1869 	/* Disable 10 bit clock */
1870 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1871 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1872 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1873 
1874 	/* Write P1 & P2 */
1875 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1876 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1877 	temp |= pll->state.hw_state.ebb0;
1878 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1879 
1880 	/* Write M2 integer */
1881 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1882 	temp &= ~PORT_PLL_M2_INT_MASK;
1883 	temp |= pll->state.hw_state.pll0;
1884 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1885 
1886 	/* Write N */
1887 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1888 	temp &= ~PORT_PLL_N_MASK;
1889 	temp |= pll->state.hw_state.pll1;
1890 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1891 
1892 	/* Write M2 fraction */
1893 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1894 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1895 	temp |= pll->state.hw_state.pll2;
1896 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1897 
1898 	/* Write M2 fraction enable */
1899 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1900 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1901 	temp |= pll->state.hw_state.pll3;
1902 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1903 
1904 	/* Write coeff */
1905 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1906 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1907 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1908 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1909 	temp |= pll->state.hw_state.pll6;
1910 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1911 
1912 	/* Write calibration val */
1913 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1914 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1915 	temp |= pll->state.hw_state.pll8;
1916 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1917 
1918 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1919 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1920 	temp |= pll->state.hw_state.pll9;
1921 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1922 
1923 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1924 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1925 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1926 	temp |= pll->state.hw_state.pll10;
1927 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1928 
1929 	/* Recalibrate with new settings */
1930 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1931 	temp |= PORT_PLL_RECALIBRATE;
1932 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1933 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1934 	temp |= pll->state.hw_state.ebb4;
1935 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1936 
1937 	/* Enable PLL */
1938 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1939 	temp |= PORT_PLL_ENABLE;
1940 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1941 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1942 
1943 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1944 			200))
1945 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1946 
1947 	if (IS_GEMINILAKE(dev_priv)) {
1948 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1949 		temp |= DCC_DELAY_RANGE_2;
1950 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1951 	}
1952 
1953 	/*
1954 	 * While we write to the group register to program all lanes at once we
1955 	 * can read only lane registers and we pick lanes 0/1 for that.
1956 	 */
1957 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1958 	temp &= ~LANE_STAGGER_MASK;
1959 	temp &= ~LANESTAGGER_STRAP_OVRD;
1960 	temp |= pll->state.hw_state.pcsdw12;
1961 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1962 }
1963 
1964 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1965 					struct intel_shared_dpll *pll)
1966 {
1967 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1968 	u32 temp;
1969 
1970 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1971 	temp &= ~PORT_PLL_ENABLE;
1972 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1973 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1974 
1975 	if (IS_GEMINILAKE(dev_priv)) {
1976 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1977 		temp &= ~PORT_PLL_POWER_ENABLE;
1978 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1979 
1980 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1981 				  PORT_PLL_POWER_STATE), 200))
1982 			drm_err(&dev_priv->drm,
1983 				"Power state not reset for PLL:%d\n", port);
1984 	}
1985 }
1986 
1987 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1988 					struct intel_shared_dpll *pll,
1989 					struct intel_dpll_hw_state *hw_state)
1990 {
1991 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1992 	intel_wakeref_t wakeref;
1993 	enum dpio_phy phy;
1994 	enum dpio_channel ch;
1995 	u32 val;
1996 	bool ret;
1997 
1998 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1999 
2000 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2001 						     POWER_DOMAIN_DISPLAY_CORE);
2002 	if (!wakeref)
2003 		return false;
2004 
2005 	ret = false;
2006 
2007 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2008 	if (!(val & PORT_PLL_ENABLE))
2009 		goto out;
2010 
2011 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2012 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2013 
2014 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2015 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2016 
2017 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2018 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2019 
2020 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2021 	hw_state->pll1 &= PORT_PLL_N_MASK;
2022 
2023 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2024 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2025 
2026 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2027 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2028 
2029 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2030 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2031 			  PORT_PLL_INT_COEFF_MASK |
2032 			  PORT_PLL_GAIN_CTL_MASK;
2033 
2034 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2035 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2036 
2037 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2038 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2039 
2040 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2041 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2042 			   PORT_PLL_DCO_AMP_MASK;
2043 
2044 	/*
2045 	 * While we write to the group register to program all lanes at once we
2046 	 * can read only lane registers. We configure all lanes the same way, so
2047 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2048 	 */
2049 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2050 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2051 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2052 		drm_dbg(&dev_priv->drm,
2053 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2054 			hw_state->pcsdw12,
2055 			intel_de_read(dev_priv,
2056 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2057 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2058 
2059 	ret = true;
2060 
2061 out:
2062 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2063 
2064 	return ret;
2065 }
2066 
2067 /* pre-calculated values for DP linkrates */
2068 static const struct dpll bxt_dp_clk_val[] = {
2069 	/* m2 is .22 binary fixed point */
2070 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2071 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2072 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2073 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2074 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2075 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2076 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2077 };
2078 
2079 static int
2080 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2081 			  struct dpll *clk_div)
2082 {
2083 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2084 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2085 
2086 	/* Calculate HDMI div */
2087 	/*
2088 	 * FIXME: tie the following calculation into
2089 	 * i9xx_crtc_compute_clock
2090 	 */
2091 	if (!bxt_find_best_dpll(crtc_state, clk_div)) {
2092 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2093 			crtc_state->port_clock,
2094 			pipe_name(crtc->pipe));
2095 		return -EINVAL;
2096 	}
2097 
2098 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2099 
2100 	return 0;
2101 }
2102 
2103 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2104 				    struct dpll *clk_div)
2105 {
2106 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2107 	int i;
2108 
2109 	*clk_div = bxt_dp_clk_val[0];
2110 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2111 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2112 			*clk_div = bxt_dp_clk_val[i];
2113 			break;
2114 		}
2115 	}
2116 
2117 	chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div);
2118 
2119 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2120 		    clk_div->dot != crtc_state->port_clock);
2121 }
2122 
2123 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2124 				     const struct dpll *clk_div)
2125 {
2126 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2127 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2128 	int clock = crtc_state->port_clock;
2129 	int vco = clk_div->vco;
2130 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2131 	u32 lanestagger;
2132 
2133 	if (vco >= 6200000 && vco <= 6700000) {
2134 		prop_coef = 4;
2135 		int_coef = 9;
2136 		gain_ctl = 3;
2137 		targ_cnt = 8;
2138 	} else if ((vco > 5400000 && vco < 6200000) ||
2139 			(vco >= 4800000 && vco < 5400000)) {
2140 		prop_coef = 5;
2141 		int_coef = 11;
2142 		gain_ctl = 3;
2143 		targ_cnt = 9;
2144 	} else if (vco == 5400000) {
2145 		prop_coef = 3;
2146 		int_coef = 8;
2147 		gain_ctl = 1;
2148 		targ_cnt = 9;
2149 	} else {
2150 		drm_err(&i915->drm, "Invalid VCO\n");
2151 		return -EINVAL;
2152 	}
2153 
2154 	if (clock > 270000)
2155 		lanestagger = 0x18;
2156 	else if (clock > 135000)
2157 		lanestagger = 0x0d;
2158 	else if (clock > 67000)
2159 		lanestagger = 0x07;
2160 	else if (clock > 33000)
2161 		lanestagger = 0x04;
2162 	else
2163 		lanestagger = 0x02;
2164 
2165 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2166 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2167 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2168 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2169 
2170 	if (clk_div->m2 & 0x3fffff)
2171 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2172 
2173 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2174 		PORT_PLL_INT_COEFF(int_coef) |
2175 		PORT_PLL_GAIN_CTL(gain_ctl);
2176 
2177 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2178 
2179 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2180 
2181 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2182 		PORT_PLL_DCO_AMP_OVR_EN_H;
2183 
2184 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2185 
2186 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2187 
2188 	return 0;
2189 }
2190 
2191 static int
2192 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2193 {
2194 	struct dpll clk_div = {};
2195 
2196 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2197 
2198 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2199 }
2200 
2201 static int
2202 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2203 {
2204 	struct dpll clk_div = {};
2205 
2206 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2207 
2208 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2209 }
2210 
2211 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2212 				const struct intel_shared_dpll *pll,
2213 				const struct intel_dpll_hw_state *pll_state)
2214 {
2215 	struct dpll clock;
2216 
2217 	clock.m1 = 2;
2218 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2219 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2220 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2221 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2222 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2223 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2224 
2225 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2226 }
2227 
2228 static int bxt_get_dpll(struct intel_atomic_state *state,
2229 			struct intel_crtc *crtc,
2230 			struct intel_encoder *encoder)
2231 {
2232 	struct intel_crtc_state *crtc_state =
2233 		intel_atomic_get_new_crtc_state(state, crtc);
2234 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2235 	struct intel_shared_dpll *pll;
2236 	enum intel_dpll_id id;
2237 	int ret;
2238 
2239 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2240 		ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2241 	else if (intel_crtc_has_dp_encoder(crtc_state))
2242 		ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2243 	else
2244 		ret = -EINVAL;
2245 	if (ret)
2246 		return ret;
2247 
2248 	/* 1:1 mapping between ports and PLLs */
2249 	id = (enum intel_dpll_id) encoder->port;
2250 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2251 
2252 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2253 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2254 
2255 	intel_reference_shared_dpll(state, crtc,
2256 				    pll, &crtc_state->dpll_hw_state);
2257 
2258 	crtc_state->shared_dpll = pll;
2259 
2260 	return 0;
2261 }
2262 
2263 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2264 {
2265 	i915->dpll.ref_clks.ssc = 100000;
2266 	i915->dpll.ref_clks.nssc = 100000;
2267 	/* DSI non-SSC ref 19.2MHz */
2268 }
2269 
2270 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2271 			      const struct intel_dpll_hw_state *hw_state)
2272 {
2273 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2274 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2275 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2276 		    hw_state->ebb0,
2277 		    hw_state->ebb4,
2278 		    hw_state->pll0,
2279 		    hw_state->pll1,
2280 		    hw_state->pll2,
2281 		    hw_state->pll3,
2282 		    hw_state->pll6,
2283 		    hw_state->pll8,
2284 		    hw_state->pll9,
2285 		    hw_state->pll10,
2286 		    hw_state->pcsdw12);
2287 }
2288 
2289 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2290 	.enable = bxt_ddi_pll_enable,
2291 	.disable = bxt_ddi_pll_disable,
2292 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2293 	.get_freq = bxt_ddi_pll_get_freq,
2294 };
2295 
2296 static const struct dpll_info bxt_plls[] = {
2297 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2298 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2299 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2300 	{ },
2301 };
2302 
2303 static const struct intel_dpll_mgr bxt_pll_mgr = {
2304 	.dpll_info = bxt_plls,
2305 	.get_dplls = bxt_get_dpll,
2306 	.put_dplls = intel_put_dpll,
2307 	.update_ref_clks = bxt_update_dpll_ref_clks,
2308 	.dump_hw_state = bxt_dump_hw_state,
2309 };
2310 
2311 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2312 				      int *qdiv, int *kdiv)
2313 {
2314 	/* even dividers */
2315 	if (bestdiv % 2 == 0) {
2316 		if (bestdiv == 2) {
2317 			*pdiv = 2;
2318 			*qdiv = 1;
2319 			*kdiv = 1;
2320 		} else if (bestdiv % 4 == 0) {
2321 			*pdiv = 2;
2322 			*qdiv = bestdiv / 4;
2323 			*kdiv = 2;
2324 		} else if (bestdiv % 6 == 0) {
2325 			*pdiv = 3;
2326 			*qdiv = bestdiv / 6;
2327 			*kdiv = 2;
2328 		} else if (bestdiv % 5 == 0) {
2329 			*pdiv = 5;
2330 			*qdiv = bestdiv / 10;
2331 			*kdiv = 2;
2332 		} else if (bestdiv % 14 == 0) {
2333 			*pdiv = 7;
2334 			*qdiv = bestdiv / 14;
2335 			*kdiv = 2;
2336 		}
2337 	} else {
2338 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2339 			*pdiv = bestdiv;
2340 			*qdiv = 1;
2341 			*kdiv = 1;
2342 		} else { /* 9, 15, 21 */
2343 			*pdiv = bestdiv / 3;
2344 			*qdiv = 1;
2345 			*kdiv = 3;
2346 		}
2347 	}
2348 }
2349 
2350 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2351 				      u32 dco_freq, u32 ref_freq,
2352 				      int pdiv, int qdiv, int kdiv)
2353 {
2354 	u32 dco;
2355 
2356 	switch (kdiv) {
2357 	case 1:
2358 		params->kdiv = 1;
2359 		break;
2360 	case 2:
2361 		params->kdiv = 2;
2362 		break;
2363 	case 3:
2364 		params->kdiv = 4;
2365 		break;
2366 	default:
2367 		WARN(1, "Incorrect KDiv\n");
2368 	}
2369 
2370 	switch (pdiv) {
2371 	case 2:
2372 		params->pdiv = 1;
2373 		break;
2374 	case 3:
2375 		params->pdiv = 2;
2376 		break;
2377 	case 5:
2378 		params->pdiv = 4;
2379 		break;
2380 	case 7:
2381 		params->pdiv = 8;
2382 		break;
2383 	default:
2384 		WARN(1, "Incorrect PDiv\n");
2385 	}
2386 
2387 	WARN_ON(kdiv != 2 && qdiv != 1);
2388 
2389 	params->qdiv_ratio = qdiv;
2390 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2391 
2392 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2393 
2394 	params->dco_integer = dco >> 15;
2395 	params->dco_fraction = dco & 0x7fff;
2396 }
2397 
2398 /*
2399  * Display WA #22010492432: ehl, tgl, adl-p
2400  * Program half of the nominal DCO divider fraction value.
2401  */
2402 static bool
2403 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2404 {
2405 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2406 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2407 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
2408 		 i915->dpll.ref_clks.nssc == 38400;
2409 }
2410 
2411 struct icl_combo_pll_params {
2412 	int clock;
2413 	struct skl_wrpll_params wrpll;
2414 };
2415 
2416 /*
2417  * These values alrea already adjusted: they're the bits we write to the
2418  * registers, not the logical values.
2419  */
2420 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2421 	{ 540000,
2422 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2423 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2424 	{ 270000,
2425 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2426 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2427 	{ 162000,
2428 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2429 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2430 	{ 324000,
2431 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2432 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2433 	{ 216000,
2434 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2435 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2436 	{ 432000,
2437 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2438 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2439 	{ 648000,
2440 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2441 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2442 	{ 810000,
2443 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2444 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2445 };
2446 
2447 
2448 /* Also used for 38.4 MHz values. */
2449 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2450 	{ 540000,
2451 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2452 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2453 	{ 270000,
2454 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2455 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2456 	{ 162000,
2457 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2458 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2459 	{ 324000,
2460 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2461 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2462 	{ 216000,
2463 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2464 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2465 	{ 432000,
2466 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2467 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2468 	{ 648000,
2469 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2470 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2471 	{ 810000,
2472 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2473 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2474 };
2475 
2476 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2477 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2478 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2479 };
2480 
2481 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2482 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2483 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2484 };
2485 
2486 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2487 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2488 	/* the following params are unused */
2489 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2490 };
2491 
2492 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2493 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2494 	/* the following params are unused */
2495 };
2496 
2497 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2498 				 struct skl_wrpll_params *pll_params)
2499 {
2500 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2501 	const struct icl_combo_pll_params *params =
2502 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2503 		icl_dp_combo_pll_24MHz_values :
2504 		icl_dp_combo_pll_19_2MHz_values;
2505 	int clock = crtc_state->port_clock;
2506 	int i;
2507 
2508 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2509 		if (clock == params[i].clock) {
2510 			*pll_params = params[i].wrpll;
2511 			return 0;
2512 		}
2513 	}
2514 
2515 	MISSING_CASE(clock);
2516 	return -EINVAL;
2517 }
2518 
2519 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2520 			    struct skl_wrpll_params *pll_params)
2521 {
2522 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2523 
2524 	if (DISPLAY_VER(dev_priv) >= 12) {
2525 		switch (dev_priv->dpll.ref_clks.nssc) {
2526 		default:
2527 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2528 			fallthrough;
2529 		case 19200:
2530 		case 38400:
2531 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2532 			break;
2533 		case 24000:
2534 			*pll_params = tgl_tbt_pll_24MHz_values;
2535 			break;
2536 		}
2537 	} else {
2538 		switch (dev_priv->dpll.ref_clks.nssc) {
2539 		default:
2540 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2541 			fallthrough;
2542 		case 19200:
2543 		case 38400:
2544 			*pll_params = icl_tbt_pll_19_2MHz_values;
2545 			break;
2546 		case 24000:
2547 			*pll_params = icl_tbt_pll_24MHz_values;
2548 			break;
2549 		}
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2556 				    const struct intel_shared_dpll *pll,
2557 				    const struct intel_dpll_hw_state *pll_state)
2558 {
2559 	/*
2560 	 * The PLL outputs multiple frequencies at the same time, selection is
2561 	 * made at DDI clock mux level.
2562 	 */
2563 	drm_WARN_ON(&i915->drm, 1);
2564 
2565 	return 0;
2566 }
2567 
2568 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2569 {
2570 	int ref_clock = i915->dpll.ref_clks.nssc;
2571 
2572 	/*
2573 	 * For ICL+, the spec states: if reference frequency is 38.4,
2574 	 * use 19.2 because the DPLL automatically divides that by 2.
2575 	 */
2576 	if (ref_clock == 38400)
2577 		ref_clock = 19200;
2578 
2579 	return ref_clock;
2580 }
2581 
2582 static int
2583 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2584 	       struct skl_wrpll_params *wrpll_params)
2585 {
2586 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2587 	int ref_clock = icl_wrpll_ref_clock(i915);
2588 	u32 afe_clock = crtc_state->port_clock * 5;
2589 	u32 dco_min = 7998000;
2590 	u32 dco_max = 10000000;
2591 	u32 dco_mid = (dco_min + dco_max) / 2;
2592 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2593 					 18, 20, 24, 28, 30, 32,  36,  40,
2594 					 42, 44, 48, 50, 52, 54,  56,  60,
2595 					 64, 66, 68, 70, 72, 76,  78,  80,
2596 					 84, 88, 90, 92, 96, 98, 100, 102,
2597 					  3,  5,  7,  9, 15, 21 };
2598 	u32 dco, best_dco = 0, dco_centrality = 0;
2599 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2600 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2601 
2602 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2603 		dco = afe_clock * dividers[d];
2604 
2605 		if (dco <= dco_max && dco >= dco_min) {
2606 			dco_centrality = abs(dco - dco_mid);
2607 
2608 			if (dco_centrality < best_dco_centrality) {
2609 				best_dco_centrality = dco_centrality;
2610 				best_div = dividers[d];
2611 				best_dco = dco;
2612 			}
2613 		}
2614 	}
2615 
2616 	if (best_div == 0)
2617 		return -EINVAL;
2618 
2619 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2620 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2621 				  pdiv, qdiv, kdiv);
2622 
2623 	return 0;
2624 }
2625 
2626 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2627 				      const struct intel_shared_dpll *pll,
2628 				      const struct intel_dpll_hw_state *pll_state)
2629 {
2630 	int ref_clock = icl_wrpll_ref_clock(i915);
2631 	u32 dco_fraction;
2632 	u32 p0, p1, p2, dco_freq;
2633 
2634 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2635 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2636 
2637 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2638 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2639 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2640 	else
2641 		p1 = 1;
2642 
2643 	switch (p0) {
2644 	case DPLL_CFGCR1_PDIV_2:
2645 		p0 = 2;
2646 		break;
2647 	case DPLL_CFGCR1_PDIV_3:
2648 		p0 = 3;
2649 		break;
2650 	case DPLL_CFGCR1_PDIV_5:
2651 		p0 = 5;
2652 		break;
2653 	case DPLL_CFGCR1_PDIV_7:
2654 		p0 = 7;
2655 		break;
2656 	}
2657 
2658 	switch (p2) {
2659 	case DPLL_CFGCR1_KDIV_1:
2660 		p2 = 1;
2661 		break;
2662 	case DPLL_CFGCR1_KDIV_2:
2663 		p2 = 2;
2664 		break;
2665 	case DPLL_CFGCR1_KDIV_3:
2666 		p2 = 3;
2667 		break;
2668 	}
2669 
2670 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2671 		   ref_clock;
2672 
2673 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2674 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2675 
2676 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2677 		dco_fraction *= 2;
2678 
2679 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2680 
2681 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2682 		return 0;
2683 
2684 	return dco_freq / (p0 * p1 * p2 * 5);
2685 }
2686 
2687 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2688 				const struct skl_wrpll_params *pll_params,
2689 				struct intel_dpll_hw_state *pll_state)
2690 {
2691 	u32 dco_fraction = pll_params->dco_fraction;
2692 
2693 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2694 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2695 
2696 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2697 			    pll_params->dco_integer;
2698 
2699 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2700 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2701 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2702 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2703 
2704 	if (DISPLAY_VER(i915) >= 12)
2705 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2706 	else
2707 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2708 
2709 	if (i915->vbt.override_afc_startup)
2710 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
2711 }
2712 
2713 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2714 				    u32 *target_dco_khz,
2715 				    struct intel_dpll_hw_state *state,
2716 				    bool is_dkl)
2717 {
2718 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2719 	u32 dco_min_freq, dco_max_freq;
2720 	unsigned int i;
2721 	int div2;
2722 
2723 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2724 	dco_max_freq = is_dp ? 8100000 : 10000000;
2725 
2726 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2727 		int div1 = div1_vals[i];
2728 
2729 		for (div2 = 10; div2 > 0; div2--) {
2730 			int dco = div1 * div2 * clock_khz * 5;
2731 			int a_divratio, tlinedrv, inputsel;
2732 			u32 hsdiv;
2733 
2734 			if (dco < dco_min_freq || dco > dco_max_freq)
2735 				continue;
2736 
2737 			if (div2 >= 2) {
2738 				/*
2739 				 * Note: a_divratio not matching TGL BSpec
2740 				 * algorithm but matching hardcoded values and
2741 				 * working on HW for DP alt-mode at least
2742 				 */
2743 				a_divratio = is_dp ? 10 : 5;
2744 				tlinedrv = is_dkl ? 1 : 2;
2745 			} else {
2746 				a_divratio = 5;
2747 				tlinedrv = 0;
2748 			}
2749 			inputsel = is_dp ? 0 : 1;
2750 
2751 			switch (div1) {
2752 			default:
2753 				MISSING_CASE(div1);
2754 				fallthrough;
2755 			case 2:
2756 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2757 				break;
2758 			case 3:
2759 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2760 				break;
2761 			case 5:
2762 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2763 				break;
2764 			case 7:
2765 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2766 				break;
2767 			}
2768 
2769 			*target_dco_khz = dco;
2770 
2771 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2772 
2773 			state->mg_clktop2_coreclkctl1 =
2774 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2775 
2776 			state->mg_clktop2_hsclkctl =
2777 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2778 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2779 				hsdiv |
2780 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2781 
2782 			return 0;
2783 		}
2784 	}
2785 
2786 	return -EINVAL;
2787 }
2788 
2789 /*
2790  * The specification for this function uses real numbers, so the math had to be
2791  * adapted to integer-only calculation, that's why it looks so different.
2792  */
2793 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2794 				 struct intel_dpll_hw_state *pll_state)
2795 {
2796 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2797 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2798 	int clock = crtc_state->port_clock;
2799 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2800 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2801 	u32 prop_coeff, int_coeff;
2802 	u32 tdc_targetcnt, feedfwgain;
2803 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2804 	u64 tmp;
2805 	bool use_ssc = false;
2806 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2807 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2808 	int ret;
2809 
2810 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2811 				       pll_state, is_dkl);
2812 	if (ret) {
2813 		drm_dbg_kms(&dev_priv->drm,
2814 			    "Failed to find divisors for clock %d\n", clock);
2815 		return ret;
2816 	}
2817 
2818 	m1div = 2;
2819 	m2div_int = dco_khz / (refclk_khz * m1div);
2820 	if (m2div_int > 255) {
2821 		if (!is_dkl) {
2822 			m1div = 4;
2823 			m2div_int = dco_khz / (refclk_khz * m1div);
2824 		}
2825 
2826 		if (m2div_int > 255) {
2827 			drm_dbg_kms(&dev_priv->drm,
2828 				    "Failed to find mdiv for clock %d\n",
2829 				    clock);
2830 			return -EINVAL;
2831 		}
2832 	}
2833 	m2div_rem = dco_khz % (refclk_khz * m1div);
2834 
2835 	tmp = (u64)m2div_rem * (1 << 22);
2836 	do_div(tmp, refclk_khz * m1div);
2837 	m2div_frac = tmp;
2838 
2839 	switch (refclk_khz) {
2840 	case 19200:
2841 		iref_ndiv = 1;
2842 		iref_trim = 28;
2843 		iref_pulse_w = 1;
2844 		break;
2845 	case 24000:
2846 		iref_ndiv = 1;
2847 		iref_trim = 25;
2848 		iref_pulse_w = 2;
2849 		break;
2850 	case 38400:
2851 		iref_ndiv = 2;
2852 		iref_trim = 28;
2853 		iref_pulse_w = 1;
2854 		break;
2855 	default:
2856 		MISSING_CASE(refclk_khz);
2857 		return -EINVAL;
2858 	}
2859 
2860 	/*
2861 	 * tdc_res = 0.000003
2862 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2863 	 *
2864 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2865 	 * was supposed to be a division, but we rearranged the operations of
2866 	 * the formula to avoid early divisions so we don't multiply the
2867 	 * rounding errors.
2868 	 *
2869 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2870 	 * we also rearrange to work with integers.
2871 	 *
2872 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2873 	 * last division by 10.
2874 	 */
2875 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2876 
2877 	/*
2878 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2879 	 * 32 bits. That's not a problem since we round the division down
2880 	 * anyway.
2881 	 */
2882 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2883 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2884 
2885 	if (dco_khz >= 9000000) {
2886 		prop_coeff = 5;
2887 		int_coeff = 10;
2888 	} else {
2889 		prop_coeff = 4;
2890 		int_coeff = 8;
2891 	}
2892 
2893 	if (use_ssc) {
2894 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2895 		do_div(tmp, refclk_khz * m1div * 10000);
2896 		ssc_stepsize = tmp;
2897 
2898 		tmp = mul_u32_u32(dco_khz, 1000);
2899 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2900 	} else {
2901 		ssc_stepsize = 0;
2902 		ssc_steplen = 0;
2903 	}
2904 	ssc_steplog = 4;
2905 
2906 	/* write pll_state calculations */
2907 	if (is_dkl) {
2908 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2909 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2910 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2911 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2912 		if (dev_priv->vbt.override_afc_startup) {
2913 			u8 val = dev_priv->vbt.override_afc_startup_val;
2914 
2915 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2916 		}
2917 
2918 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2919 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2920 
2921 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2922 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2923 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2924 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2925 
2926 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2927 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2928 
2929 		pll_state->mg_pll_tdc_coldst_bias =
2930 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2931 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2932 
2933 	} else {
2934 		pll_state->mg_pll_div0 =
2935 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2936 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2937 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2938 
2939 		pll_state->mg_pll_div1 =
2940 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2941 			MG_PLL_DIV1_DITHER_DIV_2 |
2942 			MG_PLL_DIV1_NDIVRATIO(1) |
2943 			MG_PLL_DIV1_FBPREDIV(m1div);
2944 
2945 		pll_state->mg_pll_lf =
2946 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2947 			MG_PLL_LF_AFCCNTSEL_512 |
2948 			MG_PLL_LF_GAINCTRL(1) |
2949 			MG_PLL_LF_INT_COEFF(int_coeff) |
2950 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2951 
2952 		pll_state->mg_pll_frac_lock =
2953 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2954 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2955 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2956 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2957 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2958 		if (use_ssc || m2div_rem > 0)
2959 			pll_state->mg_pll_frac_lock |=
2960 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2961 
2962 		pll_state->mg_pll_ssc =
2963 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2964 			MG_PLL_SSC_TYPE(2) |
2965 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2966 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2967 			MG_PLL_SSC_FLLEN |
2968 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2969 
2970 		pll_state->mg_pll_tdc_coldst_bias =
2971 			MG_PLL_TDC_COLDST_COLDSTART |
2972 			MG_PLL_TDC_COLDST_IREFINT_EN |
2973 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2974 			MG_PLL_TDC_TDCOVCCORR_EN |
2975 			MG_PLL_TDC_TDCSEL(3);
2976 
2977 		pll_state->mg_pll_bias =
2978 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
2979 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2980 			MG_PLL_BIAS_BIAS_BONUS(10) |
2981 			MG_PLL_BIAS_BIASCAL_EN |
2982 			MG_PLL_BIAS_CTRIM(12) |
2983 			MG_PLL_BIAS_VREF_RDAC(4) |
2984 			MG_PLL_BIAS_IREFTRIM(iref_trim);
2985 
2986 		if (refclk_khz == 38400) {
2987 			pll_state->mg_pll_tdc_coldst_bias_mask =
2988 				MG_PLL_TDC_COLDST_COLDSTART;
2989 			pll_state->mg_pll_bias_mask = 0;
2990 		} else {
2991 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2992 			pll_state->mg_pll_bias_mask = -1U;
2993 		}
2994 
2995 		pll_state->mg_pll_tdc_coldst_bias &=
2996 			pll_state->mg_pll_tdc_coldst_bias_mask;
2997 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2998 	}
2999 
3000 	return 0;
3001 }
3002 
3003 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3004 				   const struct intel_shared_dpll *pll,
3005 				   const struct intel_dpll_hw_state *pll_state)
3006 {
3007 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3008 	u64 tmp;
3009 
3010 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3011 
3012 	if (DISPLAY_VER(dev_priv) >= 12) {
3013 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3014 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3015 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3016 
3017 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3018 			m2_frac = pll_state->mg_pll_bias &
3019 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3020 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3021 		} else {
3022 			m2_frac = 0;
3023 		}
3024 	} else {
3025 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3026 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3027 
3028 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3029 			m2_frac = pll_state->mg_pll_div0 &
3030 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3031 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3032 		} else {
3033 			m2_frac = 0;
3034 		}
3035 	}
3036 
3037 	switch (pll_state->mg_clktop2_hsclkctl &
3038 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3039 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3040 		div1 = 2;
3041 		break;
3042 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3043 		div1 = 3;
3044 		break;
3045 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3046 		div1 = 5;
3047 		break;
3048 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3049 		div1 = 7;
3050 		break;
3051 	default:
3052 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3053 		return 0;
3054 	}
3055 
3056 	div2 = (pll_state->mg_clktop2_hsclkctl &
3057 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3058 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3059 
3060 	/* div2 value of 0 is same as 1 means no div */
3061 	if (div2 == 0)
3062 		div2 = 1;
3063 
3064 	/*
3065 	 * Adjust the original formula to delay the division by 2^22 in order to
3066 	 * minimize possible rounding errors.
3067 	 */
3068 	tmp = (u64)m1 * m2_int * ref_clock +
3069 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3070 	tmp = div_u64(tmp, 5 * div1 * div2);
3071 
3072 	return tmp;
3073 }
3074 
3075 /**
3076  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3077  * @crtc_state: state for the CRTC to select the DPLL for
3078  * @port_dpll_id: the active @port_dpll_id to select
3079  *
3080  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3081  * CRTC.
3082  */
3083 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3084 			      enum icl_port_dpll_id port_dpll_id)
3085 {
3086 	struct icl_port_dpll *port_dpll =
3087 		&crtc_state->icl_port_dplls[port_dpll_id];
3088 
3089 	crtc_state->shared_dpll = port_dpll->pll;
3090 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3091 }
3092 
3093 static void icl_update_active_dpll(struct intel_atomic_state *state,
3094 				   struct intel_crtc *crtc,
3095 				   struct intel_encoder *encoder)
3096 {
3097 	struct intel_crtc_state *crtc_state =
3098 		intel_atomic_get_new_crtc_state(state, crtc);
3099 	struct intel_digital_port *primary_port;
3100 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3101 
3102 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3103 		enc_to_mst(encoder)->primary :
3104 		enc_to_dig_port(encoder);
3105 
3106 	if (primary_port &&
3107 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3108 	     intel_tc_port_in_legacy_mode(primary_port)))
3109 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3110 
3111 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3112 }
3113 
3114 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3115 {
3116 	if (!(i915->hti_state & HDPORT_ENABLED))
3117 		return 0;
3118 
3119 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3120 }
3121 
3122 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3123 				  struct intel_crtc *crtc,
3124 				  struct intel_encoder *encoder)
3125 {
3126 	struct intel_crtc_state *crtc_state =
3127 		intel_atomic_get_new_crtc_state(state, crtc);
3128 	struct skl_wrpll_params pll_params = { };
3129 	struct icl_port_dpll *port_dpll =
3130 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3131 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3132 	enum port port = encoder->port;
3133 	unsigned long dpll_mask;
3134 	int ret;
3135 
3136 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3137 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3138 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3139 	else
3140 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3141 
3142 	if (ret) {
3143 		drm_dbg_kms(&dev_priv->drm,
3144 			    "Could not calculate combo PHY PLL state.\n");
3145 		return ret;
3146 	}
3147 
3148 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3149 
3150 	if (IS_ALDERLAKE_S(dev_priv)) {
3151 		dpll_mask =
3152 			BIT(DPLL_ID_DG1_DPLL3) |
3153 			BIT(DPLL_ID_DG1_DPLL2) |
3154 			BIT(DPLL_ID_ICL_DPLL1) |
3155 			BIT(DPLL_ID_ICL_DPLL0);
3156 	} else if (IS_DG1(dev_priv)) {
3157 		if (port == PORT_D || port == PORT_E) {
3158 			dpll_mask =
3159 				BIT(DPLL_ID_DG1_DPLL2) |
3160 				BIT(DPLL_ID_DG1_DPLL3);
3161 		} else {
3162 			dpll_mask =
3163 				BIT(DPLL_ID_DG1_DPLL0) |
3164 				BIT(DPLL_ID_DG1_DPLL1);
3165 		}
3166 	} else if (IS_ROCKETLAKE(dev_priv)) {
3167 		dpll_mask =
3168 			BIT(DPLL_ID_EHL_DPLL4) |
3169 			BIT(DPLL_ID_ICL_DPLL1) |
3170 			BIT(DPLL_ID_ICL_DPLL0);
3171 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3172 		dpll_mask =
3173 			BIT(DPLL_ID_EHL_DPLL4) |
3174 			BIT(DPLL_ID_ICL_DPLL1) |
3175 			BIT(DPLL_ID_ICL_DPLL0);
3176 	} else {
3177 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3178 	}
3179 
3180 	/* Eliminate DPLLs from consideration if reserved by HTI */
3181 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3182 
3183 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3184 						&port_dpll->hw_state,
3185 						dpll_mask);
3186 	if (!port_dpll->pll) {
3187 		drm_dbg_kms(&dev_priv->drm,
3188 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3189 			    encoder->base.base.id, encoder->base.name);
3190 		return -EINVAL;
3191 	}
3192 
3193 	intel_reference_shared_dpll(state, crtc,
3194 				    port_dpll->pll, &port_dpll->hw_state);
3195 
3196 	icl_update_active_dpll(state, crtc, encoder);
3197 
3198 	return 0;
3199 }
3200 
3201 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3202 				struct intel_crtc *crtc,
3203 				struct intel_encoder *encoder)
3204 {
3205 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3206 	struct intel_crtc_state *crtc_state =
3207 		intel_atomic_get_new_crtc_state(state, crtc);
3208 	struct skl_wrpll_params pll_params = { };
3209 	struct icl_port_dpll *port_dpll;
3210 	enum intel_dpll_id dpll_id;
3211 	int ret;
3212 
3213 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3214 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3215 	if (ret) {
3216 		drm_dbg_kms(&dev_priv->drm,
3217 			    "Could not calculate TBT PLL state.\n");
3218 		return ret;
3219 	}
3220 
3221 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3222 
3223 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3224 						&port_dpll->hw_state,
3225 						BIT(DPLL_ID_ICL_TBTPLL));
3226 	if (!port_dpll->pll) {
3227 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3228 		return -EINVAL;
3229 	}
3230 	intel_reference_shared_dpll(state, crtc,
3231 				    port_dpll->pll, &port_dpll->hw_state);
3232 
3233 
3234 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3235 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3236 	if (ret) {
3237 		drm_dbg_kms(&dev_priv->drm,
3238 			    "Could not calculate MG PHY PLL state.\n");
3239 		goto err_unreference_tbt_pll;
3240 	}
3241 
3242 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3243 							 encoder->port));
3244 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3245 						&port_dpll->hw_state,
3246 						BIT(dpll_id));
3247 	if (!port_dpll->pll) {
3248 		ret = -EINVAL;
3249 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3250 		goto err_unreference_tbt_pll;
3251 	}
3252 	intel_reference_shared_dpll(state, crtc,
3253 				    port_dpll->pll, &port_dpll->hw_state);
3254 
3255 	icl_update_active_dpll(state, crtc, encoder);
3256 
3257 	return 0;
3258 
3259 err_unreference_tbt_pll:
3260 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3261 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3262 
3263 	return ret;
3264 }
3265 
3266 static int icl_get_dplls(struct intel_atomic_state *state,
3267 			 struct intel_crtc *crtc,
3268 			 struct intel_encoder *encoder)
3269 {
3270 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3271 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3272 
3273 	if (intel_phy_is_combo(dev_priv, phy))
3274 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3275 	else if (intel_phy_is_tc(dev_priv, phy))
3276 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3277 
3278 	MISSING_CASE(phy);
3279 
3280 	return -EINVAL;
3281 }
3282 
3283 static void icl_put_dplls(struct intel_atomic_state *state,
3284 			  struct intel_crtc *crtc)
3285 {
3286 	const struct intel_crtc_state *old_crtc_state =
3287 		intel_atomic_get_old_crtc_state(state, crtc);
3288 	struct intel_crtc_state *new_crtc_state =
3289 		intel_atomic_get_new_crtc_state(state, crtc);
3290 	enum icl_port_dpll_id id;
3291 
3292 	new_crtc_state->shared_dpll = NULL;
3293 
3294 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3295 		const struct icl_port_dpll *old_port_dpll =
3296 			&old_crtc_state->icl_port_dplls[id];
3297 		struct icl_port_dpll *new_port_dpll =
3298 			&new_crtc_state->icl_port_dplls[id];
3299 
3300 		new_port_dpll->pll = NULL;
3301 
3302 		if (!old_port_dpll->pll)
3303 			continue;
3304 
3305 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3306 	}
3307 }
3308 
3309 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3310 				struct intel_shared_dpll *pll,
3311 				struct intel_dpll_hw_state *hw_state)
3312 {
3313 	const enum intel_dpll_id id = pll->info->id;
3314 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3315 	intel_wakeref_t wakeref;
3316 	bool ret = false;
3317 	u32 val;
3318 
3319 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3320 
3321 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3322 						     POWER_DOMAIN_DISPLAY_CORE);
3323 	if (!wakeref)
3324 		return false;
3325 
3326 	val = intel_de_read(dev_priv, enable_reg);
3327 	if (!(val & PLL_ENABLE))
3328 		goto out;
3329 
3330 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3331 						  MG_REFCLKIN_CTL(tc_port));
3332 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3333 
3334 	hw_state->mg_clktop2_coreclkctl1 =
3335 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3336 	hw_state->mg_clktop2_coreclkctl1 &=
3337 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3338 
3339 	hw_state->mg_clktop2_hsclkctl =
3340 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3341 	hw_state->mg_clktop2_hsclkctl &=
3342 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3343 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3344 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3345 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3346 
3347 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3348 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3349 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3350 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3351 						   MG_PLL_FRAC_LOCK(tc_port));
3352 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3353 
3354 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3355 	hw_state->mg_pll_tdc_coldst_bias =
3356 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3357 
3358 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3359 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3360 		hw_state->mg_pll_bias_mask = 0;
3361 	} else {
3362 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3363 		hw_state->mg_pll_bias_mask = -1U;
3364 	}
3365 
3366 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3367 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3368 
3369 	ret = true;
3370 out:
3371 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3372 	return ret;
3373 }
3374 
3375 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3376 				 struct intel_shared_dpll *pll,
3377 				 struct intel_dpll_hw_state *hw_state)
3378 {
3379 	const enum intel_dpll_id id = pll->info->id;
3380 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3381 	intel_wakeref_t wakeref;
3382 	bool ret = false;
3383 	u32 val;
3384 
3385 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3386 						     POWER_DOMAIN_DISPLAY_CORE);
3387 	if (!wakeref)
3388 		return false;
3389 
3390 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3391 	if (!(val & PLL_ENABLE))
3392 		goto out;
3393 
3394 	/*
3395 	 * All registers read here have the same HIP_INDEX_REG even though
3396 	 * they are on different building blocks
3397 	 */
3398 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3399 		       HIP_INDEX_VAL(tc_port, 0x2));
3400 
3401 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3402 						  DKL_REFCLKIN_CTL(tc_port));
3403 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3404 
3405 	hw_state->mg_clktop2_hsclkctl =
3406 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3407 	hw_state->mg_clktop2_hsclkctl &=
3408 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3409 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3410 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3411 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3412 
3413 	hw_state->mg_clktop2_coreclkctl1 =
3414 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3415 	hw_state->mg_clktop2_coreclkctl1 &=
3416 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3417 
3418 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3419 	val = DKL_PLL_DIV0_MASK;
3420 	if (dev_priv->vbt.override_afc_startup)
3421 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3422 	hw_state->mg_pll_div0 &= val;
3423 
3424 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3425 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3426 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3427 
3428 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3429 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3430 				 DKL_PLL_SSC_STEP_LEN_MASK |
3431 				 DKL_PLL_SSC_STEP_NUM_MASK |
3432 				 DKL_PLL_SSC_EN);
3433 
3434 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3435 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3436 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3437 
3438 	hw_state->mg_pll_tdc_coldst_bias =
3439 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3440 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3441 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3442 
3443 	ret = true;
3444 out:
3445 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3446 	return ret;
3447 }
3448 
3449 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3450 				 struct intel_shared_dpll *pll,
3451 				 struct intel_dpll_hw_state *hw_state,
3452 				 i915_reg_t enable_reg)
3453 {
3454 	const enum intel_dpll_id id = pll->info->id;
3455 	intel_wakeref_t wakeref;
3456 	bool ret = false;
3457 	u32 val;
3458 
3459 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3460 						     POWER_DOMAIN_DISPLAY_CORE);
3461 	if (!wakeref)
3462 		return false;
3463 
3464 	val = intel_de_read(dev_priv, enable_reg);
3465 	if (!(val & PLL_ENABLE))
3466 		goto out;
3467 
3468 	if (IS_ALDERLAKE_S(dev_priv)) {
3469 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3470 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3471 	} else if (IS_DG1(dev_priv)) {
3472 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3473 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3474 	} else if (IS_ROCKETLAKE(dev_priv)) {
3475 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3476 						 RKL_DPLL_CFGCR0(id));
3477 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3478 						 RKL_DPLL_CFGCR1(id));
3479 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3480 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3481 						 TGL_DPLL_CFGCR0(id));
3482 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3483 						 TGL_DPLL_CFGCR1(id));
3484 		if (dev_priv->vbt.override_afc_startup) {
3485 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3486 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3487 		}
3488 	} else {
3489 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3490 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3491 							 ICL_DPLL_CFGCR0(4));
3492 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3493 							 ICL_DPLL_CFGCR1(4));
3494 		} else {
3495 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3496 							 ICL_DPLL_CFGCR0(id));
3497 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3498 							 ICL_DPLL_CFGCR1(id));
3499 		}
3500 	}
3501 
3502 	ret = true;
3503 out:
3504 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3505 	return ret;
3506 }
3507 
3508 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3509 				   struct intel_shared_dpll *pll,
3510 				   struct intel_dpll_hw_state *hw_state)
3511 {
3512 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3513 
3514 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3515 }
3516 
3517 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3518 				 struct intel_shared_dpll *pll,
3519 				 struct intel_dpll_hw_state *hw_state)
3520 {
3521 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3522 }
3523 
3524 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3525 			   struct intel_shared_dpll *pll)
3526 {
3527 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3528 	const enum intel_dpll_id id = pll->info->id;
3529 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3530 
3531 	if (IS_ALDERLAKE_S(dev_priv)) {
3532 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3533 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3534 	} else if (IS_DG1(dev_priv)) {
3535 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3536 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3537 	} else if (IS_ROCKETLAKE(dev_priv)) {
3538 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3539 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3540 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3541 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3542 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3543 		div0_reg = TGL_DPLL0_DIV0(id);
3544 	} else {
3545 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3546 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3547 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3548 		} else {
3549 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3550 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3551 		}
3552 	}
3553 
3554 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3555 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3556 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
3557 			 !i915_mmio_reg_valid(div0_reg));
3558 	if (dev_priv->vbt.override_afc_startup &&
3559 	    i915_mmio_reg_valid(div0_reg))
3560 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3561 			     hw_state->div0);
3562 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3563 }
3564 
3565 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3566 			     struct intel_shared_dpll *pll)
3567 {
3568 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3569 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3570 	u32 val;
3571 
3572 	/*
3573 	 * Some of the following registers have reserved fields, so program
3574 	 * these with RMW based on a mask. The mask can be fixed or generated
3575 	 * during the calc/readout phase if the mask depends on some other HW
3576 	 * state like refclk, see icl_calc_mg_pll_state().
3577 	 */
3578 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3579 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3580 	val |= hw_state->mg_refclkin_ctl;
3581 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3582 
3583 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3584 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3585 	val |= hw_state->mg_clktop2_coreclkctl1;
3586 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3587 
3588 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3589 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3590 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3591 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3592 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3593 	val |= hw_state->mg_clktop2_hsclkctl;
3594 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3595 
3596 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3597 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3598 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3599 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3600 		       hw_state->mg_pll_frac_lock);
3601 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3602 
3603 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3604 	val &= ~hw_state->mg_pll_bias_mask;
3605 	val |= hw_state->mg_pll_bias;
3606 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3607 
3608 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3609 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3610 	val |= hw_state->mg_pll_tdc_coldst_bias;
3611 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3612 
3613 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3614 }
3615 
3616 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3617 			  struct intel_shared_dpll *pll)
3618 {
3619 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3620 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3621 	u32 val;
3622 
3623 	/*
3624 	 * All registers programmed here have the same HIP_INDEX_REG even
3625 	 * though on different building block
3626 	 */
3627 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3628 		       HIP_INDEX_VAL(tc_port, 0x2));
3629 
3630 	/* All the registers are RMW */
3631 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3632 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3633 	val |= hw_state->mg_refclkin_ctl;
3634 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3635 
3636 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3637 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3638 	val |= hw_state->mg_clktop2_coreclkctl1;
3639 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3640 
3641 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3642 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3643 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3644 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3645 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3646 	val |= hw_state->mg_clktop2_hsclkctl;
3647 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3648 
3649 	val = DKL_PLL_DIV0_MASK;
3650 	if (dev_priv->vbt.override_afc_startup)
3651 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3652 	intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3653 		     hw_state->mg_pll_div0);
3654 
3655 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3656 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3657 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3658 	val |= hw_state->mg_pll_div1;
3659 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3660 
3661 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3662 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3663 		 DKL_PLL_SSC_STEP_LEN_MASK |
3664 		 DKL_PLL_SSC_STEP_NUM_MASK |
3665 		 DKL_PLL_SSC_EN);
3666 	val |= hw_state->mg_pll_ssc;
3667 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3668 
3669 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3670 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3671 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3672 	val |= hw_state->mg_pll_bias;
3673 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3674 
3675 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3676 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3677 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3678 	val |= hw_state->mg_pll_tdc_coldst_bias;
3679 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3680 
3681 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3682 }
3683 
3684 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3685 				 struct intel_shared_dpll *pll,
3686 				 i915_reg_t enable_reg)
3687 {
3688 	u32 val;
3689 
3690 	val = intel_de_read(dev_priv, enable_reg);
3691 	val |= PLL_POWER_ENABLE;
3692 	intel_de_write(dev_priv, enable_reg, val);
3693 
3694 	/*
3695 	 * The spec says we need to "wait" but it also says it should be
3696 	 * immediate.
3697 	 */
3698 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3699 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3700 			pll->info->id);
3701 }
3702 
3703 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3704 			   struct intel_shared_dpll *pll,
3705 			   i915_reg_t enable_reg)
3706 {
3707 	u32 val;
3708 
3709 	val = intel_de_read(dev_priv, enable_reg);
3710 	val |= PLL_ENABLE;
3711 	intel_de_write(dev_priv, enable_reg, val);
3712 
3713 	/* Timeout is actually 600us. */
3714 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3715 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3716 }
3717 
3718 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3719 {
3720 	u32 val;
3721 
3722 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3723 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3724 		return;
3725 	/*
3726 	 * Wa_16011069516:adl-p[a0]
3727 	 *
3728 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3729 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3730 	 * sanity check this assumption with a double read, which presumably
3731 	 * returns the correct value even with clock gating on.
3732 	 *
3733 	 * Instead of the usual place for workarounds we apply this one here,
3734 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3735 	 */
3736 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3737 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3738 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3739 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3740 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3741 }
3742 
3743 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3744 			     struct intel_shared_dpll *pll)
3745 {
3746 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3747 
3748 	if (IS_JSL_EHL(dev_priv) &&
3749 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3750 
3751 		/*
3752 		 * We need to disable DC states when this DPLL is enabled.
3753 		 * This can be done by taking a reference on DPLL4 power
3754 		 * domain.
3755 		 */
3756 		pll->wakeref = intel_display_power_get(dev_priv,
3757 						       POWER_DOMAIN_DC_OFF);
3758 	}
3759 
3760 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3761 
3762 	icl_dpll_write(dev_priv, pll);
3763 
3764 	/*
3765 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3766 	 * paths should already be setting the appropriate voltage, hence we do
3767 	 * nothing here.
3768 	 */
3769 
3770 	icl_pll_enable(dev_priv, pll, enable_reg);
3771 
3772 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3773 
3774 	/* DVFS post sequence would be here. See the comment above. */
3775 }
3776 
3777 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3778 			   struct intel_shared_dpll *pll)
3779 {
3780 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3781 
3782 	icl_dpll_write(dev_priv, pll);
3783 
3784 	/*
3785 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3786 	 * paths should already be setting the appropriate voltage, hence we do
3787 	 * nothing here.
3788 	 */
3789 
3790 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3791 
3792 	/* DVFS post sequence would be here. See the comment above. */
3793 }
3794 
3795 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3796 			  struct intel_shared_dpll *pll)
3797 {
3798 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3799 
3800 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3801 
3802 	if (DISPLAY_VER(dev_priv) >= 12)
3803 		dkl_pll_write(dev_priv, pll);
3804 	else
3805 		icl_mg_pll_write(dev_priv, pll);
3806 
3807 	/*
3808 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3809 	 * paths should already be setting the appropriate voltage, hence we do
3810 	 * nothing here.
3811 	 */
3812 
3813 	icl_pll_enable(dev_priv, pll, enable_reg);
3814 
3815 	/* DVFS post sequence would be here. See the comment above. */
3816 }
3817 
3818 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3819 			    struct intel_shared_dpll *pll,
3820 			    i915_reg_t enable_reg)
3821 {
3822 	u32 val;
3823 
3824 	/* The first steps are done by intel_ddi_post_disable(). */
3825 
3826 	/*
3827 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3828 	 * paths should already be setting the appropriate voltage, hence we do
3829 	 * nothing here.
3830 	 */
3831 
3832 	val = intel_de_read(dev_priv, enable_reg);
3833 	val &= ~PLL_ENABLE;
3834 	intel_de_write(dev_priv, enable_reg, val);
3835 
3836 	/* Timeout is actually 1us. */
3837 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3838 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3839 
3840 	/* DVFS post sequence would be here. See the comment above. */
3841 
3842 	val = intel_de_read(dev_priv, enable_reg);
3843 	val &= ~PLL_POWER_ENABLE;
3844 	intel_de_write(dev_priv, enable_reg, val);
3845 
3846 	/*
3847 	 * The spec says we need to "wait" but it also says it should be
3848 	 * immediate.
3849 	 */
3850 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3851 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3852 			pll->info->id);
3853 }
3854 
3855 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3856 			      struct intel_shared_dpll *pll)
3857 {
3858 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3859 
3860 	icl_pll_disable(dev_priv, pll, enable_reg);
3861 
3862 	if (IS_JSL_EHL(dev_priv) &&
3863 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3864 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3865 					pll->wakeref);
3866 }
3867 
3868 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3869 			    struct intel_shared_dpll *pll)
3870 {
3871 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3872 }
3873 
3874 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3875 			   struct intel_shared_dpll *pll)
3876 {
3877 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3878 
3879 	icl_pll_disable(dev_priv, pll, enable_reg);
3880 }
3881 
3882 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3883 {
3884 	/* No SSC ref */
3885 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3886 }
3887 
3888 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3889 			      const struct intel_dpll_hw_state *hw_state)
3890 {
3891 	drm_dbg_kms(&dev_priv->drm,
3892 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3893 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3894 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3895 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3896 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3897 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3898 		    hw_state->cfgcr0, hw_state->cfgcr1,
3899 		    hw_state->div0,
3900 		    hw_state->mg_refclkin_ctl,
3901 		    hw_state->mg_clktop2_coreclkctl1,
3902 		    hw_state->mg_clktop2_hsclkctl,
3903 		    hw_state->mg_pll_div0,
3904 		    hw_state->mg_pll_div1,
3905 		    hw_state->mg_pll_lf,
3906 		    hw_state->mg_pll_frac_lock,
3907 		    hw_state->mg_pll_ssc,
3908 		    hw_state->mg_pll_bias,
3909 		    hw_state->mg_pll_tdc_coldst_bias);
3910 }
3911 
3912 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3913 	.enable = combo_pll_enable,
3914 	.disable = combo_pll_disable,
3915 	.get_hw_state = combo_pll_get_hw_state,
3916 	.get_freq = icl_ddi_combo_pll_get_freq,
3917 };
3918 
3919 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3920 	.enable = tbt_pll_enable,
3921 	.disable = tbt_pll_disable,
3922 	.get_hw_state = tbt_pll_get_hw_state,
3923 	.get_freq = icl_ddi_tbt_pll_get_freq,
3924 };
3925 
3926 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3927 	.enable = mg_pll_enable,
3928 	.disable = mg_pll_disable,
3929 	.get_hw_state = mg_pll_get_hw_state,
3930 	.get_freq = icl_ddi_mg_pll_get_freq,
3931 };
3932 
3933 static const struct dpll_info icl_plls[] = {
3934 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3935 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3936 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3937 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3938 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3939 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3940 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3941 	{ },
3942 };
3943 
3944 static const struct intel_dpll_mgr icl_pll_mgr = {
3945 	.dpll_info = icl_plls,
3946 	.get_dplls = icl_get_dplls,
3947 	.put_dplls = icl_put_dplls,
3948 	.update_active_dpll = icl_update_active_dpll,
3949 	.update_ref_clks = icl_update_dpll_ref_clks,
3950 	.dump_hw_state = icl_dump_hw_state,
3951 };
3952 
3953 static const struct dpll_info ehl_plls[] = {
3954 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3955 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3956 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3957 	{ },
3958 };
3959 
3960 static const struct intel_dpll_mgr ehl_pll_mgr = {
3961 	.dpll_info = ehl_plls,
3962 	.get_dplls = icl_get_dplls,
3963 	.put_dplls = icl_put_dplls,
3964 	.update_ref_clks = icl_update_dpll_ref_clks,
3965 	.dump_hw_state = icl_dump_hw_state,
3966 };
3967 
3968 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3969 	.enable = mg_pll_enable,
3970 	.disable = mg_pll_disable,
3971 	.get_hw_state = dkl_pll_get_hw_state,
3972 	.get_freq = icl_ddi_mg_pll_get_freq,
3973 };
3974 
3975 static const struct dpll_info tgl_plls[] = {
3976 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3977 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3978 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3979 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3980 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3981 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3982 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3983 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3984 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3985 	{ },
3986 };
3987 
3988 static const struct intel_dpll_mgr tgl_pll_mgr = {
3989 	.dpll_info = tgl_plls,
3990 	.get_dplls = icl_get_dplls,
3991 	.put_dplls = icl_put_dplls,
3992 	.update_active_dpll = icl_update_active_dpll,
3993 	.update_ref_clks = icl_update_dpll_ref_clks,
3994 	.dump_hw_state = icl_dump_hw_state,
3995 };
3996 
3997 static const struct dpll_info rkl_plls[] = {
3998 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3999 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4000 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4001 	{ },
4002 };
4003 
4004 static const struct intel_dpll_mgr rkl_pll_mgr = {
4005 	.dpll_info = rkl_plls,
4006 	.get_dplls = icl_get_dplls,
4007 	.put_dplls = icl_put_dplls,
4008 	.update_ref_clks = icl_update_dpll_ref_clks,
4009 	.dump_hw_state = icl_dump_hw_state,
4010 };
4011 
4012 static const struct dpll_info dg1_plls[] = {
4013 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4014 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4015 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4016 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4017 	{ },
4018 };
4019 
4020 static const struct intel_dpll_mgr dg1_pll_mgr = {
4021 	.dpll_info = dg1_plls,
4022 	.get_dplls = icl_get_dplls,
4023 	.put_dplls = icl_put_dplls,
4024 	.update_ref_clks = icl_update_dpll_ref_clks,
4025 	.dump_hw_state = icl_dump_hw_state,
4026 };
4027 
4028 static const struct dpll_info adls_plls[] = {
4029 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4030 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4031 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4032 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4033 	{ },
4034 };
4035 
4036 static const struct intel_dpll_mgr adls_pll_mgr = {
4037 	.dpll_info = adls_plls,
4038 	.get_dplls = icl_get_dplls,
4039 	.put_dplls = icl_put_dplls,
4040 	.update_ref_clks = icl_update_dpll_ref_clks,
4041 	.dump_hw_state = icl_dump_hw_state,
4042 };
4043 
4044 static const struct dpll_info adlp_plls[] = {
4045 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4046 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4047 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4048 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4049 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4050 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4051 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4052 	{ },
4053 };
4054 
4055 static const struct intel_dpll_mgr adlp_pll_mgr = {
4056 	.dpll_info = adlp_plls,
4057 	.get_dplls = icl_get_dplls,
4058 	.put_dplls = icl_put_dplls,
4059 	.update_active_dpll = icl_update_active_dpll,
4060 	.update_ref_clks = icl_update_dpll_ref_clks,
4061 	.dump_hw_state = icl_dump_hw_state,
4062 };
4063 
4064 /**
4065  * intel_shared_dpll_init - Initialize shared DPLLs
4066  * @dev_priv: i915 device
4067  *
4068  * Initialize shared DPLLs for @dev_priv.
4069  */
4070 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4071 {
4072 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4073 	const struct dpll_info *dpll_info;
4074 	int i;
4075 
4076 	if (IS_DG2(dev_priv))
4077 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4078 		dpll_mgr = NULL;
4079 	else if (IS_ALDERLAKE_P(dev_priv))
4080 		dpll_mgr = &adlp_pll_mgr;
4081 	else if (IS_ALDERLAKE_S(dev_priv))
4082 		dpll_mgr = &adls_pll_mgr;
4083 	else if (IS_DG1(dev_priv))
4084 		dpll_mgr = &dg1_pll_mgr;
4085 	else if (IS_ROCKETLAKE(dev_priv))
4086 		dpll_mgr = &rkl_pll_mgr;
4087 	else if (DISPLAY_VER(dev_priv) >= 12)
4088 		dpll_mgr = &tgl_pll_mgr;
4089 	else if (IS_JSL_EHL(dev_priv))
4090 		dpll_mgr = &ehl_pll_mgr;
4091 	else if (DISPLAY_VER(dev_priv) >= 11)
4092 		dpll_mgr = &icl_pll_mgr;
4093 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4094 		dpll_mgr = &bxt_pll_mgr;
4095 	else if (DISPLAY_VER(dev_priv) == 9)
4096 		dpll_mgr = &skl_pll_mgr;
4097 	else if (HAS_DDI(dev_priv))
4098 		dpll_mgr = &hsw_pll_mgr;
4099 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4100 		dpll_mgr = &pch_pll_mgr;
4101 
4102 	if (!dpll_mgr) {
4103 		dev_priv->dpll.num_shared_dpll = 0;
4104 		return;
4105 	}
4106 
4107 	dpll_info = dpll_mgr->dpll_info;
4108 
4109 	for (i = 0; dpll_info[i].name; i++) {
4110 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4111 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4112 	}
4113 
4114 	dev_priv->dpll.mgr = dpll_mgr;
4115 	dev_priv->dpll.num_shared_dpll = i;
4116 	mutex_init(&dev_priv->dpll.lock);
4117 
4118 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4119 }
4120 
4121 /**
4122  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4123  * @state: atomic state
4124  * @crtc: CRTC to reserve DPLLs for
4125  * @encoder: encoder
4126  *
4127  * This function reserves all required DPLLs for the given CRTC and encoder
4128  * combination in the current atomic commit @state and the new @crtc atomic
4129  * state.
4130  *
4131  * The new configuration in the atomic commit @state is made effective by
4132  * calling intel_shared_dpll_swap_state().
4133  *
4134  * The reserved DPLLs should be released by calling
4135  * intel_release_shared_dplls().
4136  *
4137  * Returns:
4138  * 0 if all required DPLLs were successfully reserved,
4139  * negative error code otherwise.
4140  */
4141 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4142 			       struct intel_crtc *crtc,
4143 			       struct intel_encoder *encoder)
4144 {
4145 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4146 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4147 
4148 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4149 		return -EINVAL;
4150 
4151 	return dpll_mgr->get_dplls(state, crtc, encoder);
4152 }
4153 
4154 /**
4155  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4156  * @state: atomic state
4157  * @crtc: crtc from which the DPLLs are to be released
4158  *
4159  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4160  * from the current atomic commit @state and the old @crtc atomic state.
4161  *
4162  * The new configuration in the atomic commit @state is made effective by
4163  * calling intel_shared_dpll_swap_state().
4164  */
4165 void intel_release_shared_dplls(struct intel_atomic_state *state,
4166 				struct intel_crtc *crtc)
4167 {
4168 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4169 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4170 
4171 	/*
4172 	 * FIXME: this function is called for every platform having a
4173 	 * compute_clock hook, even though the platform doesn't yet support
4174 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4175 	 * called on those.
4176 	 */
4177 	if (!dpll_mgr)
4178 		return;
4179 
4180 	dpll_mgr->put_dplls(state, crtc);
4181 }
4182 
4183 /**
4184  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4185  * @state: atomic state
4186  * @crtc: the CRTC for which to update the active DPLL
4187  * @encoder: encoder determining the type of port DPLL
4188  *
4189  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4190  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4191  * DPLL selected will be based on the current mode of the encoder's port.
4192  */
4193 void intel_update_active_dpll(struct intel_atomic_state *state,
4194 			      struct intel_crtc *crtc,
4195 			      struct intel_encoder *encoder)
4196 {
4197 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4198 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4199 
4200 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4201 		return;
4202 
4203 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4204 }
4205 
4206 /**
4207  * intel_dpll_get_freq - calculate the DPLL's output frequency
4208  * @i915: i915 device
4209  * @pll: DPLL for which to calculate the output frequency
4210  * @pll_state: DPLL state from which to calculate the output frequency
4211  *
4212  * Return the output frequency corresponding to @pll's passed in @pll_state.
4213  */
4214 int intel_dpll_get_freq(struct drm_i915_private *i915,
4215 			const struct intel_shared_dpll *pll,
4216 			const struct intel_dpll_hw_state *pll_state)
4217 {
4218 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4219 		return 0;
4220 
4221 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4222 }
4223 
4224 /**
4225  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4226  * @i915: i915 device
4227  * @pll: DPLL for which to calculate the output frequency
4228  * @hw_state: DPLL's hardware state
4229  *
4230  * Read out @pll's hardware state into @hw_state.
4231  */
4232 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4233 			     struct intel_shared_dpll *pll,
4234 			     struct intel_dpll_hw_state *hw_state)
4235 {
4236 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4237 }
4238 
4239 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4240 				  struct intel_shared_dpll *pll)
4241 {
4242 	struct intel_crtc *crtc;
4243 
4244 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4245 
4246 	if (IS_JSL_EHL(i915) && pll->on &&
4247 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4248 		pll->wakeref = intel_display_power_get(i915,
4249 						       POWER_DOMAIN_DC_OFF);
4250 	}
4251 
4252 	pll->state.pipe_mask = 0;
4253 	for_each_intel_crtc(&i915->drm, crtc) {
4254 		struct intel_crtc_state *crtc_state =
4255 			to_intel_crtc_state(crtc->base.state);
4256 
4257 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4258 			pll->state.pipe_mask |= BIT(crtc->pipe);
4259 	}
4260 	pll->active_mask = pll->state.pipe_mask;
4261 
4262 	drm_dbg_kms(&i915->drm,
4263 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4264 		    pll->info->name, pll->state.pipe_mask, pll->on);
4265 }
4266 
4267 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4268 {
4269 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4270 		i915->dpll.mgr->update_ref_clks(i915);
4271 }
4272 
4273 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4274 {
4275 	int i;
4276 
4277 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4278 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4279 }
4280 
4281 static void sanitize_dpll_state(struct drm_i915_private *i915,
4282 				struct intel_shared_dpll *pll)
4283 {
4284 	if (!pll->on)
4285 		return;
4286 
4287 	adlp_cmtg_clock_gating_wa(i915, pll);
4288 
4289 	if (pll->active_mask)
4290 		return;
4291 
4292 	drm_dbg_kms(&i915->drm,
4293 		    "%s enabled but not in use, disabling\n",
4294 		    pll->info->name);
4295 
4296 	pll->info->funcs->disable(i915, pll);
4297 	pll->on = false;
4298 }
4299 
4300 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4301 {
4302 	int i;
4303 
4304 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4305 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4306 }
4307 
4308 /**
4309  * intel_dpll_dump_hw_state - write hw_state to dmesg
4310  * @dev_priv: i915 drm device
4311  * @hw_state: hw state to be written to the log
4312  *
4313  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4314  */
4315 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4316 			      const struct intel_dpll_hw_state *hw_state)
4317 {
4318 	if (dev_priv->dpll.mgr) {
4319 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4320 	} else {
4321 		/* fallback for platforms that don't use the shared dpll
4322 		 * infrastructure
4323 		 */
4324 		drm_dbg_kms(&dev_priv->drm,
4325 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4326 			    "fp0: 0x%x, fp1: 0x%x\n",
4327 			    hw_state->dpll,
4328 			    hw_state->dpll_md,
4329 			    hw_state->fp0,
4330 			    hw_state->fp1);
4331 	}
4332 }
4333