xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision daa2be74b1b2302004945b2a5e32424e177cc7da)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "bxt_dpio_phy_regs.h"
28 #include "i915_reg.h"
29 #include "intel_de.h"
30 #include "intel_display_types.h"
31 #include "intel_dkl_phy.h"
32 #include "intel_dkl_phy_regs.h"
33 #include "intel_dpio_phy.h"
34 #include "intel_dpll.h"
35 #include "intel_dpll_mgr.h"
36 #include "intel_hti.h"
37 #include "intel_mg_phy_regs.h"
38 #include "intel_pch_refclk.h"
39 #include "intel_tc.h"
40 
41 /**
42  * DOC: Display PLLs
43  *
44  * Display PLLs used for driving outputs vary by platform. While some have
45  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
46  * from a pool. In the latter scenario, it is possible that multiple pipes
47  * share a PLL if their configurations match.
48  *
49  * This file provides an abstraction over display PLLs. The function
50  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
51  * users of a PLL are tracked and that tracking is integrated with the atomic
52  * modset interface. During an atomic operation, required PLLs can be reserved
53  * for a given CRTC and encoder configuration by calling
54  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
55  * with intel_release_shared_dplls().
56  * Changes to the users are first staged in the atomic state, and then made
57  * effective by calling intel_shared_dpll_swap_state() during the atomic
58  * commit phase.
59  */
60 
61 /* platform specific hooks for managing DPLLs */
62 struct intel_shared_dpll_funcs {
63 	/*
64 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
65 	 * the pll is not already enabled.
66 	 */
67 	void (*enable)(struct drm_i915_private *i915,
68 		       struct intel_shared_dpll *pll,
69 		       const struct intel_dpll_hw_state *dpll_hw_state);
70 
71 	/*
72 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
73 	 * only when it is safe to disable the pll, i.e., there are no more
74 	 * tracked users for it.
75 	 */
76 	void (*disable)(struct drm_i915_private *i915,
77 			struct intel_shared_dpll *pll);
78 
79 	/*
80 	 * Hook for reading the values currently programmed to the DPLL
81 	 * registers. This is used for initial hw state readout and state
82 	 * verification after a mode set.
83 	 */
84 	bool (*get_hw_state)(struct drm_i915_private *i915,
85 			     struct intel_shared_dpll *pll,
86 			     struct intel_dpll_hw_state *dpll_hw_state);
87 
88 	/*
89 	 * Hook for calculating the pll's output frequency based on its passed
90 	 * in state.
91 	 */
92 	int (*get_freq)(struct drm_i915_private *i915,
93 			const struct intel_shared_dpll *pll,
94 			const struct intel_dpll_hw_state *dpll_hw_state);
95 };
96 
97 struct intel_dpll_mgr {
98 	const struct dpll_info *dpll_info;
99 
100 	int (*compute_dplls)(struct intel_atomic_state *state,
101 			     struct intel_crtc *crtc,
102 			     struct intel_encoder *encoder);
103 	int (*get_dplls)(struct intel_atomic_state *state,
104 			 struct intel_crtc *crtc,
105 			 struct intel_encoder *encoder);
106 	void (*put_dplls)(struct intel_atomic_state *state,
107 			  struct intel_crtc *crtc);
108 	void (*update_active_dpll)(struct intel_atomic_state *state,
109 				   struct intel_crtc *crtc,
110 				   struct intel_encoder *encoder);
111 	void (*update_ref_clks)(struct drm_i915_private *i915);
112 	void (*dump_hw_state)(struct drm_printer *p,
113 			      const struct intel_dpll_hw_state *dpll_hw_state);
114 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
115 				 const struct intel_dpll_hw_state *b);
116 };
117 
118 static void
119 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
120 				  struct intel_shared_dpll_state *shared_dpll)
121 {
122 	struct intel_shared_dpll *pll;
123 	int i;
124 
125 	/* Copy shared dpll state */
126 	for_each_shared_dpll(i915, pll, i)
127 		shared_dpll[pll->index] = pll->state;
128 }
129 
130 static struct intel_shared_dpll_state *
131 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
132 {
133 	struct intel_atomic_state *state = to_intel_atomic_state(s);
134 
135 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
136 
137 	if (!state->dpll_set) {
138 		state->dpll_set = true;
139 
140 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
141 						  state->shared_dpll);
142 	}
143 
144 	return state->shared_dpll;
145 }
146 
147 /**
148  * intel_get_shared_dpll_by_id - get a DPLL given its id
149  * @i915: i915 device instance
150  * @id: pll id
151  *
152  * Returns:
153  * A pointer to the DPLL with @id
154  */
155 struct intel_shared_dpll *
156 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
157 			    enum intel_dpll_id id)
158 {
159 	struct intel_shared_dpll *pll;
160 	int i;
161 
162 	for_each_shared_dpll(i915, pll, i) {
163 		if (pll->info->id == id)
164 			return pll;
165 	}
166 
167 	MISSING_CASE(id);
168 	return NULL;
169 }
170 
171 /* For ILK+ */
172 void assert_shared_dpll(struct drm_i915_private *i915,
173 			struct intel_shared_dpll *pll,
174 			bool state)
175 {
176 	bool cur_state;
177 	struct intel_dpll_hw_state hw_state;
178 
179 	if (drm_WARN(&i915->drm, !pll,
180 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
181 		return;
182 
183 	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
184 	I915_STATE_WARN(i915, cur_state != state,
185 			"%s assertion failure (expected %s, current %s)\n",
186 			pll->info->name, str_on_off(state),
187 			str_on_off(cur_state));
188 }
189 
190 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
191 {
192 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
193 }
194 
195 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
196 {
197 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
198 }
199 
200 static i915_reg_t
201 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
202 			   struct intel_shared_dpll *pll)
203 {
204 	if (IS_DG1(i915))
205 		return DG1_DPLL_ENABLE(pll->info->id);
206 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
207 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
208 		return MG_PLL_ENABLE(0);
209 
210 	return ICL_DPLL_ENABLE(pll->info->id);
211 }
212 
213 static i915_reg_t
214 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
215 			struct intel_shared_dpll *pll)
216 {
217 	const enum intel_dpll_id id = pll->info->id;
218 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
219 
220 	if (IS_ALDERLAKE_P(i915))
221 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
222 
223 	return MG_PLL_ENABLE(tc_port);
224 }
225 
226 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
227 				      struct intel_shared_dpll *pll)
228 {
229 	if (pll->info->power_domain)
230 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
231 
232 	pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
233 	pll->on = true;
234 }
235 
236 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
237 				       struct intel_shared_dpll *pll)
238 {
239 	pll->info->funcs->disable(i915, pll);
240 	pll->on = false;
241 
242 	if (pll->info->power_domain)
243 		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
244 }
245 
246 /**
247  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
248  * @crtc_state: CRTC, and its state, which has a shared DPLL
249  *
250  * Enable the shared DPLL used by @crtc.
251  */
252 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
253 {
254 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
255 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
256 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
257 	unsigned int pipe_mask = BIT(crtc->pipe);
258 	unsigned int old_mask;
259 
260 	if (drm_WARN_ON(&i915->drm, pll == NULL))
261 		return;
262 
263 	mutex_lock(&i915->display.dpll.lock);
264 	old_mask = pll->active_mask;
265 
266 	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
267 	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
268 		goto out;
269 
270 	pll->active_mask |= pipe_mask;
271 
272 	drm_dbg_kms(&i915->drm,
273 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
274 		    pll->info->name, pll->active_mask, pll->on,
275 		    crtc->base.base.id, crtc->base.name);
276 
277 	if (old_mask) {
278 		drm_WARN_ON(&i915->drm, !pll->on);
279 		assert_shared_dpll_enabled(i915, pll);
280 		goto out;
281 	}
282 	drm_WARN_ON(&i915->drm, pll->on);
283 
284 	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
285 
286 	_intel_enable_shared_dpll(i915, pll);
287 
288 out:
289 	mutex_unlock(&i915->display.dpll.lock);
290 }
291 
292 /**
293  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
294  * @crtc_state: CRTC, and its state, which has a shared DPLL
295  *
296  * Disable the shared DPLL used by @crtc.
297  */
298 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
299 {
300 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
301 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
302 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
303 	unsigned int pipe_mask = BIT(crtc->pipe);
304 
305 	/* PCH only available on ILK+ */
306 	if (DISPLAY_VER(i915) < 5)
307 		return;
308 
309 	if (pll == NULL)
310 		return;
311 
312 	mutex_lock(&i915->display.dpll.lock);
313 	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
314 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
315 		     crtc->base.base.id, crtc->base.name))
316 		goto out;
317 
318 	drm_dbg_kms(&i915->drm,
319 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
320 		    pll->info->name, pll->active_mask, pll->on,
321 		    crtc->base.base.id, crtc->base.name);
322 
323 	assert_shared_dpll_enabled(i915, pll);
324 	drm_WARN_ON(&i915->drm, !pll->on);
325 
326 	pll->active_mask &= ~pipe_mask;
327 	if (pll->active_mask)
328 		goto out;
329 
330 	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
331 
332 	_intel_disable_shared_dpll(i915, pll);
333 
334 out:
335 	mutex_unlock(&i915->display.dpll.lock);
336 }
337 
338 static unsigned long
339 intel_dpll_mask_all(struct drm_i915_private *i915)
340 {
341 	struct intel_shared_dpll *pll;
342 	unsigned long dpll_mask = 0;
343 	int i;
344 
345 	for_each_shared_dpll(i915, pll, i) {
346 		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
347 
348 		dpll_mask |= BIT(pll->info->id);
349 	}
350 
351 	return dpll_mask;
352 }
353 
354 static struct intel_shared_dpll *
355 intel_find_shared_dpll(struct intel_atomic_state *state,
356 		       const struct intel_crtc *crtc,
357 		       const struct intel_dpll_hw_state *dpll_hw_state,
358 		       unsigned long dpll_mask)
359 {
360 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
361 	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
362 	struct intel_shared_dpll_state *shared_dpll;
363 	struct intel_shared_dpll *unused_pll = NULL;
364 	enum intel_dpll_id id;
365 
366 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
367 
368 	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
369 
370 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
371 		struct intel_shared_dpll *pll;
372 
373 		pll = intel_get_shared_dpll_by_id(i915, id);
374 		if (!pll)
375 			continue;
376 
377 		/* Only want to check enabled timings first */
378 		if (shared_dpll[pll->index].pipe_mask == 0) {
379 			if (!unused_pll)
380 				unused_pll = pll;
381 			continue;
382 		}
383 
384 		if (memcmp(dpll_hw_state,
385 			   &shared_dpll[pll->index].hw_state,
386 			   sizeof(*dpll_hw_state)) == 0) {
387 			drm_dbg_kms(&i915->drm,
388 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
389 				    crtc->base.base.id, crtc->base.name,
390 				    pll->info->name,
391 				    shared_dpll[pll->index].pipe_mask,
392 				    pll->active_mask);
393 			return pll;
394 		}
395 	}
396 
397 	/* Ok no matching timings, maybe there's a free one? */
398 	if (unused_pll) {
399 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
400 			    crtc->base.base.id, crtc->base.name,
401 			    unused_pll->info->name);
402 		return unused_pll;
403 	}
404 
405 	return NULL;
406 }
407 
408 /**
409  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
410  * @crtc: CRTC on which behalf the reference is taken
411  * @pll: DPLL for which the reference is taken
412  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
413  *
414  * Take a reference for @pll tracking the use of it by @crtc.
415  */
416 static void
417 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
418 				 const struct intel_shared_dpll *pll,
419 				 struct intel_shared_dpll_state *shared_dpll_state)
420 {
421 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
422 
423 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
424 
425 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
426 
427 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
428 		    crtc->base.base.id, crtc->base.name, pll->info->name);
429 }
430 
431 static void
432 intel_reference_shared_dpll(struct intel_atomic_state *state,
433 			    const struct intel_crtc *crtc,
434 			    const struct intel_shared_dpll *pll,
435 			    const struct intel_dpll_hw_state *dpll_hw_state)
436 {
437 	struct intel_shared_dpll_state *shared_dpll;
438 
439 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
440 
441 	if (shared_dpll[pll->index].pipe_mask == 0)
442 		shared_dpll[pll->index].hw_state = *dpll_hw_state;
443 
444 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
445 }
446 
447 /**
448  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
449  * @crtc: CRTC on which behalf the reference is dropped
450  * @pll: DPLL for which the reference is dropped
451  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
452  *
453  * Drop a reference for @pll tracking the end of use of it by @crtc.
454  */
455 void
456 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
457 				   const struct intel_shared_dpll *pll,
458 				   struct intel_shared_dpll_state *shared_dpll_state)
459 {
460 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
461 
462 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
463 
464 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
465 
466 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
467 		    crtc->base.base.id, crtc->base.name, pll->info->name);
468 }
469 
470 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
471 					  const struct intel_crtc *crtc,
472 					  const struct intel_shared_dpll *pll)
473 {
474 	struct intel_shared_dpll_state *shared_dpll;
475 
476 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
477 
478 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
479 }
480 
481 static void intel_put_dpll(struct intel_atomic_state *state,
482 			   struct intel_crtc *crtc)
483 {
484 	const struct intel_crtc_state *old_crtc_state =
485 		intel_atomic_get_old_crtc_state(state, crtc);
486 	struct intel_crtc_state *new_crtc_state =
487 		intel_atomic_get_new_crtc_state(state, crtc);
488 
489 	new_crtc_state->shared_dpll = NULL;
490 
491 	if (!old_crtc_state->shared_dpll)
492 		return;
493 
494 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
495 }
496 
497 /**
498  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
499  * @state: atomic state
500  *
501  * This is the dpll version of drm_atomic_helper_swap_state() since the
502  * helper does not handle driver-specific global state.
503  *
504  * For consistency with atomic helpers this function does a complete swap,
505  * i.e. it also puts the current state into @state, even though there is no
506  * need for that at this moment.
507  */
508 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
509 {
510 	struct drm_i915_private *i915 = to_i915(state->base.dev);
511 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
512 	struct intel_shared_dpll *pll;
513 	int i;
514 
515 	if (!state->dpll_set)
516 		return;
517 
518 	for_each_shared_dpll(i915, pll, i)
519 		swap(pll->state, shared_dpll[pll->index]);
520 }
521 
522 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
523 				      struct intel_shared_dpll *pll,
524 				      struct intel_dpll_hw_state *dpll_hw_state)
525 {
526 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
527 	const enum intel_dpll_id id = pll->info->id;
528 	intel_wakeref_t wakeref;
529 	u32 val;
530 
531 	wakeref = intel_display_power_get_if_enabled(i915,
532 						     POWER_DOMAIN_DISPLAY_CORE);
533 	if (!wakeref)
534 		return false;
535 
536 	val = intel_de_read(i915, PCH_DPLL(id));
537 	hw_state->dpll = val;
538 	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
539 	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
540 
541 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
542 
543 	return val & DPLL_VCO_ENABLE;
544 }
545 
546 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
547 {
548 	u32 val;
549 	bool enabled;
550 
551 	val = intel_de_read(i915, PCH_DREF_CONTROL);
552 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
553 			    DREF_SUPERSPREAD_SOURCE_MASK));
554 	I915_STATE_WARN(i915, !enabled,
555 			"PCH refclk assertion failure, should be active but is disabled\n");
556 }
557 
558 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
559 				struct intel_shared_dpll *pll,
560 				const struct intel_dpll_hw_state *dpll_hw_state)
561 {
562 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
563 	const enum intel_dpll_id id = pll->info->id;
564 
565 	/* PCH refclock must be enabled first */
566 	ibx_assert_pch_refclk_enabled(i915);
567 
568 	intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
569 	intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
570 
571 	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
572 
573 	/* Wait for the clocks to stabilize. */
574 	intel_de_posting_read(i915, PCH_DPLL(id));
575 	udelay(150);
576 
577 	/* The pixel multiplier can only be updated once the
578 	 * DPLL is enabled and the clocks are stable.
579 	 *
580 	 * So write it again.
581 	 */
582 	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
583 	intel_de_posting_read(i915, PCH_DPLL(id));
584 	udelay(200);
585 }
586 
587 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
588 				 struct intel_shared_dpll *pll)
589 {
590 	const enum intel_dpll_id id = pll->info->id;
591 
592 	intel_de_write(i915, PCH_DPLL(id), 0);
593 	intel_de_posting_read(i915, PCH_DPLL(id));
594 	udelay(200);
595 }
596 
597 static int ibx_compute_dpll(struct intel_atomic_state *state,
598 			    struct intel_crtc *crtc,
599 			    struct intel_encoder *encoder)
600 {
601 	return 0;
602 }
603 
604 static int ibx_get_dpll(struct intel_atomic_state *state,
605 			struct intel_crtc *crtc,
606 			struct intel_encoder *encoder)
607 {
608 	struct intel_crtc_state *crtc_state =
609 		intel_atomic_get_new_crtc_state(state, crtc);
610 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
611 	struct intel_shared_dpll *pll;
612 	enum intel_dpll_id id;
613 
614 	if (HAS_PCH_IBX(i915)) {
615 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
616 		id = (enum intel_dpll_id) crtc->pipe;
617 		pll = intel_get_shared_dpll_by_id(i915, id);
618 
619 		drm_dbg_kms(&i915->drm,
620 			    "[CRTC:%d:%s] using pre-allocated %s\n",
621 			    crtc->base.base.id, crtc->base.name,
622 			    pll->info->name);
623 	} else {
624 		pll = intel_find_shared_dpll(state, crtc,
625 					     &crtc_state->dpll_hw_state,
626 					     BIT(DPLL_ID_PCH_PLL_B) |
627 					     BIT(DPLL_ID_PCH_PLL_A));
628 	}
629 
630 	if (!pll)
631 		return -EINVAL;
632 
633 	/* reference the pll */
634 	intel_reference_shared_dpll(state, crtc,
635 				    pll, &crtc_state->dpll_hw_state);
636 
637 	crtc_state->shared_dpll = pll;
638 
639 	return 0;
640 }
641 
642 static void ibx_dump_hw_state(struct drm_printer *p,
643 			      const struct intel_dpll_hw_state *dpll_hw_state)
644 {
645 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
646 
647 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
648 		   "fp0: 0x%x, fp1: 0x%x\n",
649 		   hw_state->dpll,
650 		   hw_state->dpll_md,
651 		   hw_state->fp0,
652 		   hw_state->fp1);
653 }
654 
655 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
656 				 const struct intel_dpll_hw_state *_b)
657 {
658 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
659 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
660 
661 	return a->dpll == b->dpll &&
662 		a->dpll_md == b->dpll_md &&
663 		a->fp0 == b->fp0 &&
664 		a->fp1 == b->fp1;
665 }
666 
667 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
668 	.enable = ibx_pch_dpll_enable,
669 	.disable = ibx_pch_dpll_disable,
670 	.get_hw_state = ibx_pch_dpll_get_hw_state,
671 };
672 
673 static const struct dpll_info pch_plls[] = {
674 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
675 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
676 	{}
677 };
678 
679 static const struct intel_dpll_mgr pch_pll_mgr = {
680 	.dpll_info = pch_plls,
681 	.compute_dplls = ibx_compute_dpll,
682 	.get_dplls = ibx_get_dpll,
683 	.put_dplls = intel_put_dpll,
684 	.dump_hw_state = ibx_dump_hw_state,
685 	.compare_hw_state = ibx_compare_hw_state,
686 };
687 
688 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
689 				 struct intel_shared_dpll *pll,
690 				 const struct intel_dpll_hw_state *dpll_hw_state)
691 {
692 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
693 	const enum intel_dpll_id id = pll->info->id;
694 
695 	intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
696 	intel_de_posting_read(i915, WRPLL_CTL(id));
697 	udelay(20);
698 }
699 
700 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
701 				struct intel_shared_dpll *pll,
702 				const struct intel_dpll_hw_state *dpll_hw_state)
703 {
704 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
705 
706 	intel_de_write(i915, SPLL_CTL, hw_state->spll);
707 	intel_de_posting_read(i915, SPLL_CTL);
708 	udelay(20);
709 }
710 
711 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
712 				  struct intel_shared_dpll *pll)
713 {
714 	const enum intel_dpll_id id = pll->info->id;
715 
716 	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
717 	intel_de_posting_read(i915, WRPLL_CTL(id));
718 
719 	/*
720 	 * Try to set up the PCH reference clock once all DPLLs
721 	 * that depend on it have been shut down.
722 	 */
723 	if (i915->display.dpll.pch_ssc_use & BIT(id))
724 		intel_init_pch_refclk(i915);
725 }
726 
727 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
728 				 struct intel_shared_dpll *pll)
729 {
730 	enum intel_dpll_id id = pll->info->id;
731 
732 	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
733 	intel_de_posting_read(i915, SPLL_CTL);
734 
735 	/*
736 	 * Try to set up the PCH reference clock once all DPLLs
737 	 * that depend on it have been shut down.
738 	 */
739 	if (i915->display.dpll.pch_ssc_use & BIT(id))
740 		intel_init_pch_refclk(i915);
741 }
742 
743 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
744 				       struct intel_shared_dpll *pll,
745 				       struct intel_dpll_hw_state *dpll_hw_state)
746 {
747 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
748 	const enum intel_dpll_id id = pll->info->id;
749 	intel_wakeref_t wakeref;
750 	u32 val;
751 
752 	wakeref = intel_display_power_get_if_enabled(i915,
753 						     POWER_DOMAIN_DISPLAY_CORE);
754 	if (!wakeref)
755 		return false;
756 
757 	val = intel_de_read(i915, WRPLL_CTL(id));
758 	hw_state->wrpll = val;
759 
760 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
761 
762 	return val & WRPLL_PLL_ENABLE;
763 }
764 
765 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
766 				      struct intel_shared_dpll *pll,
767 				      struct intel_dpll_hw_state *dpll_hw_state)
768 {
769 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
770 	intel_wakeref_t wakeref;
771 	u32 val;
772 
773 	wakeref = intel_display_power_get_if_enabled(i915,
774 						     POWER_DOMAIN_DISPLAY_CORE);
775 	if (!wakeref)
776 		return false;
777 
778 	val = intel_de_read(i915, SPLL_CTL);
779 	hw_state->spll = val;
780 
781 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
782 
783 	return val & SPLL_PLL_ENABLE;
784 }
785 
786 #define LC_FREQ 2700
787 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
788 
789 #define P_MIN 2
790 #define P_MAX 64
791 #define P_INC 2
792 
793 /* Constraints for PLL good behavior */
794 #define REF_MIN 48
795 #define REF_MAX 400
796 #define VCO_MIN 2400
797 #define VCO_MAX 4800
798 
799 struct hsw_wrpll_rnp {
800 	unsigned p, n2, r2;
801 };
802 
803 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
804 {
805 	switch (clock) {
806 	case 25175000:
807 	case 25200000:
808 	case 27000000:
809 	case 27027000:
810 	case 37762500:
811 	case 37800000:
812 	case 40500000:
813 	case 40541000:
814 	case 54000000:
815 	case 54054000:
816 	case 59341000:
817 	case 59400000:
818 	case 72000000:
819 	case 74176000:
820 	case 74250000:
821 	case 81000000:
822 	case 81081000:
823 	case 89012000:
824 	case 89100000:
825 	case 108000000:
826 	case 108108000:
827 	case 111264000:
828 	case 111375000:
829 	case 148352000:
830 	case 148500000:
831 	case 162000000:
832 	case 162162000:
833 	case 222525000:
834 	case 222750000:
835 	case 296703000:
836 	case 297000000:
837 		return 0;
838 	case 233500000:
839 	case 245250000:
840 	case 247750000:
841 	case 253250000:
842 	case 298000000:
843 		return 1500;
844 	case 169128000:
845 	case 169500000:
846 	case 179500000:
847 	case 202000000:
848 		return 2000;
849 	case 256250000:
850 	case 262500000:
851 	case 270000000:
852 	case 272500000:
853 	case 273750000:
854 	case 280750000:
855 	case 281250000:
856 	case 286000000:
857 	case 291750000:
858 		return 4000;
859 	case 267250000:
860 	case 268500000:
861 		return 5000;
862 	default:
863 		return 1000;
864 	}
865 }
866 
867 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
868 				 unsigned int r2, unsigned int n2,
869 				 unsigned int p,
870 				 struct hsw_wrpll_rnp *best)
871 {
872 	u64 a, b, c, d, diff, diff_best;
873 
874 	/* No best (r,n,p) yet */
875 	if (best->p == 0) {
876 		best->p = p;
877 		best->n2 = n2;
878 		best->r2 = r2;
879 		return;
880 	}
881 
882 	/*
883 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
884 	 * freq2k.
885 	 *
886 	 * delta = 1e6 *
887 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
888 	 *	   freq2k;
889 	 *
890 	 * and we would like delta <= budget.
891 	 *
892 	 * If the discrepancy is above the PPM-based budget, always prefer to
893 	 * improve upon the previous solution.  However, if you're within the
894 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
895 	 */
896 	a = freq2k * budget * p * r2;
897 	b = freq2k * budget * best->p * best->r2;
898 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
899 	diff_best = abs_diff(freq2k * best->p * best->r2,
900 			     LC_FREQ_2K * best->n2);
901 	c = 1000000 * diff;
902 	d = 1000000 * diff_best;
903 
904 	if (a < c && b < d) {
905 		/* If both are above the budget, pick the closer */
906 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
907 			best->p = p;
908 			best->n2 = n2;
909 			best->r2 = r2;
910 		}
911 	} else if (a >= c && b < d) {
912 		/* If A is below the threshold but B is above it?  Update. */
913 		best->p = p;
914 		best->n2 = n2;
915 		best->r2 = r2;
916 	} else if (a >= c && b >= d) {
917 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
918 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
919 			best->p = p;
920 			best->n2 = n2;
921 			best->r2 = r2;
922 		}
923 	}
924 	/* Otherwise a < c && b >= d, do nothing */
925 }
926 
927 static void
928 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
929 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
930 {
931 	u64 freq2k;
932 	unsigned p, n2, r2;
933 	struct hsw_wrpll_rnp best = {};
934 	unsigned budget;
935 
936 	freq2k = clock / 100;
937 
938 	budget = hsw_wrpll_get_budget_for_freq(clock);
939 
940 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
941 	 * and directly pass the LC PLL to it. */
942 	if (freq2k == 5400000) {
943 		*n2_out = 2;
944 		*p_out = 1;
945 		*r2_out = 2;
946 		return;
947 	}
948 
949 	/*
950 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
951 	 * the WR PLL.
952 	 *
953 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
954 	 * Injecting R2 = 2 * R gives:
955 	 *   REF_MAX * r2 > LC_FREQ * 2 and
956 	 *   REF_MIN * r2 < LC_FREQ * 2
957 	 *
958 	 * Which means the desired boundaries for r2 are:
959 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
960 	 *
961 	 */
962 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
963 	     r2 <= LC_FREQ * 2 / REF_MIN;
964 	     r2++) {
965 
966 		/*
967 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
968 		 *
969 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
970 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
971 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
972 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
973 		 *
974 		 * Which means the desired boundaries for n2 are:
975 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
976 		 */
977 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
978 		     n2 <= VCO_MAX * r2 / LC_FREQ;
979 		     n2++) {
980 
981 			for (p = P_MIN; p <= P_MAX; p += P_INC)
982 				hsw_wrpll_update_rnp(freq2k, budget,
983 						     r2, n2, p, &best);
984 		}
985 	}
986 
987 	*n2_out = best.n2;
988 	*p_out = best.p;
989 	*r2_out = best.r2;
990 }
991 
992 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
993 				  const struct intel_shared_dpll *pll,
994 				  const struct intel_dpll_hw_state *dpll_hw_state)
995 {
996 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
997 	int refclk;
998 	int n, p, r;
999 	u32 wrpll = hw_state->wrpll;
1000 
1001 	switch (wrpll & WRPLL_REF_MASK) {
1002 	case WRPLL_REF_SPECIAL_HSW:
1003 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1004 		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
1005 			refclk = i915->display.dpll.ref_clks.nssc;
1006 			break;
1007 		}
1008 		fallthrough;
1009 	case WRPLL_REF_PCH_SSC:
1010 		/*
1011 		 * We could calculate spread here, but our checking
1012 		 * code only cares about 5% accuracy, and spread is a max of
1013 		 * 0.5% downspread.
1014 		 */
1015 		refclk = i915->display.dpll.ref_clks.ssc;
1016 		break;
1017 	case WRPLL_REF_LCPLL:
1018 		refclk = 2700000;
1019 		break;
1020 	default:
1021 		MISSING_CASE(wrpll);
1022 		return 0;
1023 	}
1024 
1025 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1026 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1027 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1028 
1029 	/* Convert to KHz, p & r have a fixed point portion */
1030 	return (refclk * n / 10) / (p * r) * 2;
1031 }
1032 
1033 static int
1034 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1035 			   struct intel_crtc *crtc)
1036 {
1037 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1038 	struct intel_crtc_state *crtc_state =
1039 		intel_atomic_get_new_crtc_state(state, crtc);
1040 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1041 	unsigned int p, n2, r2;
1042 
1043 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1044 
1045 	hw_state->wrpll =
1046 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1047 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1048 		WRPLL_DIVIDER_POST(p);
1049 
1050 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1051 							&crtc_state->dpll_hw_state);
1052 
1053 	return 0;
1054 }
1055 
1056 static struct intel_shared_dpll *
1057 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1058 		       struct intel_crtc *crtc)
1059 {
1060 	struct intel_crtc_state *crtc_state =
1061 		intel_atomic_get_new_crtc_state(state, crtc);
1062 
1063 	return intel_find_shared_dpll(state, crtc,
1064 				      &crtc_state->dpll_hw_state,
1065 				      BIT(DPLL_ID_WRPLL2) |
1066 				      BIT(DPLL_ID_WRPLL1));
1067 }
1068 
1069 static int
1070 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1071 {
1072 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1073 	int clock = crtc_state->port_clock;
1074 
1075 	switch (clock / 2) {
1076 	case 81000:
1077 	case 135000:
1078 	case 270000:
1079 		return 0;
1080 	default:
1081 		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1082 			    clock);
1083 		return -EINVAL;
1084 	}
1085 }
1086 
1087 static struct intel_shared_dpll *
1088 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1089 {
1090 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1091 	struct intel_shared_dpll *pll;
1092 	enum intel_dpll_id pll_id;
1093 	int clock = crtc_state->port_clock;
1094 
1095 	switch (clock / 2) {
1096 	case 81000:
1097 		pll_id = DPLL_ID_LCPLL_810;
1098 		break;
1099 	case 135000:
1100 		pll_id = DPLL_ID_LCPLL_1350;
1101 		break;
1102 	case 270000:
1103 		pll_id = DPLL_ID_LCPLL_2700;
1104 		break;
1105 	default:
1106 		MISSING_CASE(clock / 2);
1107 		return NULL;
1108 	}
1109 
1110 	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1111 
1112 	if (!pll)
1113 		return NULL;
1114 
1115 	return pll;
1116 }
1117 
1118 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1119 				  const struct intel_shared_dpll *pll,
1120 				  const struct intel_dpll_hw_state *dpll_hw_state)
1121 {
1122 	int link_clock = 0;
1123 
1124 	switch (pll->info->id) {
1125 	case DPLL_ID_LCPLL_810:
1126 		link_clock = 81000;
1127 		break;
1128 	case DPLL_ID_LCPLL_1350:
1129 		link_clock = 135000;
1130 		break;
1131 	case DPLL_ID_LCPLL_2700:
1132 		link_clock = 270000;
1133 		break;
1134 	default:
1135 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1136 		break;
1137 	}
1138 
1139 	return link_clock * 2;
1140 }
1141 
1142 static int
1143 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1144 			  struct intel_crtc *crtc)
1145 {
1146 	struct intel_crtc_state *crtc_state =
1147 		intel_atomic_get_new_crtc_state(state, crtc);
1148 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1149 
1150 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1151 		return -EINVAL;
1152 
1153 	hw_state->spll =
1154 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1155 
1156 	return 0;
1157 }
1158 
1159 static struct intel_shared_dpll *
1160 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1161 		      struct intel_crtc *crtc)
1162 {
1163 	struct intel_crtc_state *crtc_state =
1164 		intel_atomic_get_new_crtc_state(state, crtc);
1165 
1166 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1167 				      BIT(DPLL_ID_SPLL));
1168 }
1169 
1170 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1171 				 const struct intel_shared_dpll *pll,
1172 				 const struct intel_dpll_hw_state *dpll_hw_state)
1173 {
1174 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1175 	int link_clock = 0;
1176 
1177 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1178 	case SPLL_FREQ_810MHz:
1179 		link_clock = 81000;
1180 		break;
1181 	case SPLL_FREQ_1350MHz:
1182 		link_clock = 135000;
1183 		break;
1184 	case SPLL_FREQ_2700MHz:
1185 		link_clock = 270000;
1186 		break;
1187 	default:
1188 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1189 		break;
1190 	}
1191 
1192 	return link_clock * 2;
1193 }
1194 
1195 static int hsw_compute_dpll(struct intel_atomic_state *state,
1196 			    struct intel_crtc *crtc,
1197 			    struct intel_encoder *encoder)
1198 {
1199 	struct intel_crtc_state *crtc_state =
1200 		intel_atomic_get_new_crtc_state(state, crtc);
1201 
1202 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1203 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1204 	else if (intel_crtc_has_dp_encoder(crtc_state))
1205 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1206 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1207 		return hsw_ddi_spll_compute_dpll(state, crtc);
1208 	else
1209 		return -EINVAL;
1210 }
1211 
1212 static int hsw_get_dpll(struct intel_atomic_state *state,
1213 			struct intel_crtc *crtc,
1214 			struct intel_encoder *encoder)
1215 {
1216 	struct intel_crtc_state *crtc_state =
1217 		intel_atomic_get_new_crtc_state(state, crtc);
1218 	struct intel_shared_dpll *pll = NULL;
1219 
1220 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1221 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1222 	else if (intel_crtc_has_dp_encoder(crtc_state))
1223 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1224 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1225 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1226 
1227 	if (!pll)
1228 		return -EINVAL;
1229 
1230 	intel_reference_shared_dpll(state, crtc,
1231 				    pll, &crtc_state->dpll_hw_state);
1232 
1233 	crtc_state->shared_dpll = pll;
1234 
1235 	return 0;
1236 }
1237 
1238 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1239 {
1240 	i915->display.dpll.ref_clks.ssc = 135000;
1241 	/* Non-SSC is only used on non-ULT HSW. */
1242 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1243 		i915->display.dpll.ref_clks.nssc = 24000;
1244 	else
1245 		i915->display.dpll.ref_clks.nssc = 135000;
1246 }
1247 
1248 static void hsw_dump_hw_state(struct drm_printer *p,
1249 			      const struct intel_dpll_hw_state *dpll_hw_state)
1250 {
1251 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1252 
1253 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1254 		   hw_state->wrpll, hw_state->spll);
1255 }
1256 
1257 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1258 				 const struct intel_dpll_hw_state *_b)
1259 {
1260 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1261 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1262 
1263 	return a->wrpll == b->wrpll &&
1264 		a->spll == b->spll;
1265 }
1266 
1267 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1268 	.enable = hsw_ddi_wrpll_enable,
1269 	.disable = hsw_ddi_wrpll_disable,
1270 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1271 	.get_freq = hsw_ddi_wrpll_get_freq,
1272 };
1273 
1274 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1275 	.enable = hsw_ddi_spll_enable,
1276 	.disable = hsw_ddi_spll_disable,
1277 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1278 	.get_freq = hsw_ddi_spll_get_freq,
1279 };
1280 
1281 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1282 				 struct intel_shared_dpll *pll,
1283 				 const struct intel_dpll_hw_state *hw_state)
1284 {
1285 }
1286 
1287 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1288 				  struct intel_shared_dpll *pll)
1289 {
1290 }
1291 
1292 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1293 				       struct intel_shared_dpll *pll,
1294 				       struct intel_dpll_hw_state *dpll_hw_state)
1295 {
1296 	return true;
1297 }
1298 
1299 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1300 	.enable = hsw_ddi_lcpll_enable,
1301 	.disable = hsw_ddi_lcpll_disable,
1302 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1303 	.get_freq = hsw_ddi_lcpll_get_freq,
1304 };
1305 
1306 static const struct dpll_info hsw_plls[] = {
1307 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1308 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1309 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1310 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1311 	  .always_on = true, },
1312 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1313 	  .always_on = true, },
1314 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1315 	  .always_on = true, },
1316 	{}
1317 };
1318 
1319 static const struct intel_dpll_mgr hsw_pll_mgr = {
1320 	.dpll_info = hsw_plls,
1321 	.compute_dplls = hsw_compute_dpll,
1322 	.get_dplls = hsw_get_dpll,
1323 	.put_dplls = intel_put_dpll,
1324 	.update_ref_clks = hsw_update_dpll_ref_clks,
1325 	.dump_hw_state = hsw_dump_hw_state,
1326 	.compare_hw_state = hsw_compare_hw_state,
1327 };
1328 
1329 struct skl_dpll_regs {
1330 	i915_reg_t ctl, cfgcr1, cfgcr2;
1331 };
1332 
1333 /* this array is indexed by the *shared* pll id */
1334 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1335 	{
1336 		/* DPLL 0 */
1337 		.ctl = LCPLL1_CTL,
1338 		/* DPLL 0 doesn't support HDMI mode */
1339 	},
1340 	{
1341 		/* DPLL 1 */
1342 		.ctl = LCPLL2_CTL,
1343 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1344 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1345 	},
1346 	{
1347 		/* DPLL 2 */
1348 		.ctl = WRPLL_CTL(0),
1349 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1350 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1351 	},
1352 	{
1353 		/* DPLL 3 */
1354 		.ctl = WRPLL_CTL(1),
1355 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1356 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1357 	},
1358 };
1359 
1360 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1361 				    struct intel_shared_dpll *pll,
1362 				    const struct skl_dpll_hw_state *hw_state)
1363 {
1364 	const enum intel_dpll_id id = pll->info->id;
1365 
1366 	intel_de_rmw(i915, DPLL_CTRL1,
1367 		     DPLL_CTRL1_HDMI_MODE(id) |
1368 		     DPLL_CTRL1_SSC(id) |
1369 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1370 		     hw_state->ctrl1 << (id * 6));
1371 	intel_de_posting_read(i915, DPLL_CTRL1);
1372 }
1373 
1374 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1375 			       struct intel_shared_dpll *pll,
1376 			       const struct intel_dpll_hw_state *dpll_hw_state)
1377 {
1378 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1379 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1380 	const enum intel_dpll_id id = pll->info->id;
1381 
1382 	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1383 
1384 	intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
1385 	intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
1386 	intel_de_posting_read(i915, regs[id].cfgcr1);
1387 	intel_de_posting_read(i915, regs[id].cfgcr2);
1388 
1389 	/* the enable bit is always bit 31 */
1390 	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1391 
1392 	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1393 		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1394 }
1395 
1396 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1397 				 struct intel_shared_dpll *pll,
1398 				 const struct intel_dpll_hw_state *dpll_hw_state)
1399 {
1400 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1401 
1402 	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1403 }
1404 
1405 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1406 				struct intel_shared_dpll *pll)
1407 {
1408 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1409 	const enum intel_dpll_id id = pll->info->id;
1410 
1411 	/* the enable bit is always bit 31 */
1412 	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1413 	intel_de_posting_read(i915, regs[id].ctl);
1414 }
1415 
1416 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1417 				  struct intel_shared_dpll *pll)
1418 {
1419 }
1420 
1421 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1422 				     struct intel_shared_dpll *pll,
1423 				     struct intel_dpll_hw_state *dpll_hw_state)
1424 {
1425 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1426 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1427 	const enum intel_dpll_id id = pll->info->id;
1428 	intel_wakeref_t wakeref;
1429 	bool ret;
1430 	u32 val;
1431 
1432 	wakeref = intel_display_power_get_if_enabled(i915,
1433 						     POWER_DOMAIN_DISPLAY_CORE);
1434 	if (!wakeref)
1435 		return false;
1436 
1437 	ret = false;
1438 
1439 	val = intel_de_read(i915, regs[id].ctl);
1440 	if (!(val & LCPLL_PLL_ENABLE))
1441 		goto out;
1442 
1443 	val = intel_de_read(i915, DPLL_CTRL1);
1444 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1445 
1446 	/* avoid reading back stale values if HDMI mode is not enabled */
1447 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1448 		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1449 		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1450 	}
1451 	ret = true;
1452 
1453 out:
1454 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1455 
1456 	return ret;
1457 }
1458 
1459 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1460 				       struct intel_shared_dpll *pll,
1461 				       struct intel_dpll_hw_state *dpll_hw_state)
1462 {
1463 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1464 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1465 	const enum intel_dpll_id id = pll->info->id;
1466 	intel_wakeref_t wakeref;
1467 	u32 val;
1468 	bool ret;
1469 
1470 	wakeref = intel_display_power_get_if_enabled(i915,
1471 						     POWER_DOMAIN_DISPLAY_CORE);
1472 	if (!wakeref)
1473 		return false;
1474 
1475 	ret = false;
1476 
1477 	/* DPLL0 is always enabled since it drives CDCLK */
1478 	val = intel_de_read(i915, regs[id].ctl);
1479 	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1480 		goto out;
1481 
1482 	val = intel_de_read(i915, DPLL_CTRL1);
1483 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1484 
1485 	ret = true;
1486 
1487 out:
1488 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1489 
1490 	return ret;
1491 }
1492 
1493 struct skl_wrpll_context {
1494 	u64 min_deviation;		/* current minimal deviation */
1495 	u64 central_freq;		/* chosen central freq */
1496 	u64 dco_freq;			/* chosen dco freq */
1497 	unsigned int p;			/* chosen divider */
1498 };
1499 
1500 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1501 #define SKL_DCO_MAX_PDEVIATION	100
1502 #define SKL_DCO_MAX_NDEVIATION	600
1503 
1504 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1505 				  u64 central_freq,
1506 				  u64 dco_freq,
1507 				  unsigned int divider)
1508 {
1509 	u64 deviation;
1510 
1511 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1512 			      central_freq);
1513 
1514 	/* positive deviation */
1515 	if (dco_freq >= central_freq) {
1516 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1517 		    deviation < ctx->min_deviation) {
1518 			ctx->min_deviation = deviation;
1519 			ctx->central_freq = central_freq;
1520 			ctx->dco_freq = dco_freq;
1521 			ctx->p = divider;
1522 		}
1523 	/* negative deviation */
1524 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1525 		   deviation < ctx->min_deviation) {
1526 		ctx->min_deviation = deviation;
1527 		ctx->central_freq = central_freq;
1528 		ctx->dco_freq = dco_freq;
1529 		ctx->p = divider;
1530 	}
1531 }
1532 
1533 static void skl_wrpll_get_multipliers(unsigned int p,
1534 				      unsigned int *p0 /* out */,
1535 				      unsigned int *p1 /* out */,
1536 				      unsigned int *p2 /* out */)
1537 {
1538 	/* even dividers */
1539 	if (p % 2 == 0) {
1540 		unsigned int half = p / 2;
1541 
1542 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1543 			*p0 = 2;
1544 			*p1 = 1;
1545 			*p2 = half;
1546 		} else if (half % 2 == 0) {
1547 			*p0 = 2;
1548 			*p1 = half / 2;
1549 			*p2 = 2;
1550 		} else if (half % 3 == 0) {
1551 			*p0 = 3;
1552 			*p1 = half / 3;
1553 			*p2 = 2;
1554 		} else if (half % 7 == 0) {
1555 			*p0 = 7;
1556 			*p1 = half / 7;
1557 			*p2 = 2;
1558 		}
1559 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1560 		*p0 = 3;
1561 		*p1 = 1;
1562 		*p2 = p / 3;
1563 	} else if (p == 5 || p == 7) {
1564 		*p0 = p;
1565 		*p1 = 1;
1566 		*p2 = 1;
1567 	} else if (p == 15) {
1568 		*p0 = 3;
1569 		*p1 = 1;
1570 		*p2 = 5;
1571 	} else if (p == 21) {
1572 		*p0 = 7;
1573 		*p1 = 1;
1574 		*p2 = 3;
1575 	} else if (p == 35) {
1576 		*p0 = 7;
1577 		*p1 = 1;
1578 		*p2 = 5;
1579 	}
1580 }
1581 
1582 struct skl_wrpll_params {
1583 	u32 dco_fraction;
1584 	u32 dco_integer;
1585 	u32 qdiv_ratio;
1586 	u32 qdiv_mode;
1587 	u32 kdiv;
1588 	u32 pdiv;
1589 	u32 central_freq;
1590 };
1591 
1592 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1593 				      u64 afe_clock,
1594 				      int ref_clock,
1595 				      u64 central_freq,
1596 				      u32 p0, u32 p1, u32 p2)
1597 {
1598 	u64 dco_freq;
1599 
1600 	switch (central_freq) {
1601 	case 9600000000ULL:
1602 		params->central_freq = 0;
1603 		break;
1604 	case 9000000000ULL:
1605 		params->central_freq = 1;
1606 		break;
1607 	case 8400000000ULL:
1608 		params->central_freq = 3;
1609 	}
1610 
1611 	switch (p0) {
1612 	case 1:
1613 		params->pdiv = 0;
1614 		break;
1615 	case 2:
1616 		params->pdiv = 1;
1617 		break;
1618 	case 3:
1619 		params->pdiv = 2;
1620 		break;
1621 	case 7:
1622 		params->pdiv = 4;
1623 		break;
1624 	default:
1625 		WARN(1, "Incorrect PDiv\n");
1626 	}
1627 
1628 	switch (p2) {
1629 	case 5:
1630 		params->kdiv = 0;
1631 		break;
1632 	case 2:
1633 		params->kdiv = 1;
1634 		break;
1635 	case 3:
1636 		params->kdiv = 2;
1637 		break;
1638 	case 1:
1639 		params->kdiv = 3;
1640 		break;
1641 	default:
1642 		WARN(1, "Incorrect KDiv\n");
1643 	}
1644 
1645 	params->qdiv_ratio = p1;
1646 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1647 
1648 	dco_freq = p0 * p1 * p2 * afe_clock;
1649 
1650 	/*
1651 	 * Intermediate values are in Hz.
1652 	 * Divide by MHz to match bsepc
1653 	 */
1654 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1655 	params->dco_fraction =
1656 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1657 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1658 }
1659 
1660 static int
1661 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1662 			int ref_clock,
1663 			struct skl_wrpll_params *wrpll_params)
1664 {
1665 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1666 						 9000000000ULL,
1667 						 9600000000ULL };
1668 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1669 					    24, 28, 30, 32, 36, 40, 42, 44,
1670 					    48, 52, 54, 56, 60, 64, 66, 68,
1671 					    70, 72, 76, 78, 80, 84, 88, 90,
1672 					    92, 96, 98 };
1673 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1674 	static const struct {
1675 		const u8 *list;
1676 		int n_dividers;
1677 	} dividers[] = {
1678 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1679 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1680 	};
1681 	struct skl_wrpll_context ctx = {
1682 		.min_deviation = U64_MAX,
1683 	};
1684 	unsigned int dco, d, i;
1685 	unsigned int p0, p1, p2;
1686 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1687 
1688 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1689 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1690 			for (i = 0; i < dividers[d].n_dividers; i++) {
1691 				unsigned int p = dividers[d].list[i];
1692 				u64 dco_freq = p * afe_clock;
1693 
1694 				skl_wrpll_try_divider(&ctx,
1695 						      dco_central_freq[dco],
1696 						      dco_freq,
1697 						      p);
1698 				/*
1699 				 * Skip the remaining dividers if we're sure to
1700 				 * have found the definitive divider, we can't
1701 				 * improve a 0 deviation.
1702 				 */
1703 				if (ctx.min_deviation == 0)
1704 					goto skip_remaining_dividers;
1705 			}
1706 		}
1707 
1708 skip_remaining_dividers:
1709 		/*
1710 		 * If a solution is found with an even divider, prefer
1711 		 * this one.
1712 		 */
1713 		if (d == 0 && ctx.p)
1714 			break;
1715 	}
1716 
1717 	if (!ctx.p)
1718 		return -EINVAL;
1719 
1720 	/*
1721 	 * gcc incorrectly analyses that these can be used without being
1722 	 * initialized. To be fair, it's hard to guess.
1723 	 */
1724 	p0 = p1 = p2 = 0;
1725 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1726 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1727 				  ctx.central_freq, p0, p1, p2);
1728 
1729 	return 0;
1730 }
1731 
1732 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1733 				  const struct intel_shared_dpll *pll,
1734 				  const struct intel_dpll_hw_state *dpll_hw_state)
1735 {
1736 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1737 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1738 	u32 p0, p1, p2, dco_freq;
1739 
1740 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1741 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1742 
1743 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1744 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1745 	else
1746 		p1 = 1;
1747 
1748 
1749 	switch (p0) {
1750 	case DPLL_CFGCR2_PDIV_1:
1751 		p0 = 1;
1752 		break;
1753 	case DPLL_CFGCR2_PDIV_2:
1754 		p0 = 2;
1755 		break;
1756 	case DPLL_CFGCR2_PDIV_3:
1757 		p0 = 3;
1758 		break;
1759 	case DPLL_CFGCR2_PDIV_7_INVALID:
1760 		/*
1761 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1762 		 * handling it the same way as PDIV_7.
1763 		 */
1764 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1765 		fallthrough;
1766 	case DPLL_CFGCR2_PDIV_7:
1767 		p0 = 7;
1768 		break;
1769 	default:
1770 		MISSING_CASE(p0);
1771 		return 0;
1772 	}
1773 
1774 	switch (p2) {
1775 	case DPLL_CFGCR2_KDIV_5:
1776 		p2 = 5;
1777 		break;
1778 	case DPLL_CFGCR2_KDIV_2:
1779 		p2 = 2;
1780 		break;
1781 	case DPLL_CFGCR2_KDIV_3:
1782 		p2 = 3;
1783 		break;
1784 	case DPLL_CFGCR2_KDIV_1:
1785 		p2 = 1;
1786 		break;
1787 	default:
1788 		MISSING_CASE(p2);
1789 		return 0;
1790 	}
1791 
1792 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1793 		   ref_clock;
1794 
1795 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1796 		    ref_clock / 0x8000;
1797 
1798 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1799 		return 0;
1800 
1801 	return dco_freq / (p0 * p1 * p2 * 5);
1802 }
1803 
1804 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1805 {
1806 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1807 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1808 	struct skl_wrpll_params wrpll_params = {};
1809 	int ret;
1810 
1811 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1812 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1813 	if (ret)
1814 		return ret;
1815 
1816 	/*
1817 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1818 	 * as the DPLL id in this function.
1819 	 */
1820 	hw_state->ctrl1 =
1821 		DPLL_CTRL1_OVERRIDE(0) |
1822 		DPLL_CTRL1_HDMI_MODE(0);
1823 
1824 	hw_state->cfgcr1 =
1825 		DPLL_CFGCR1_FREQ_ENABLE |
1826 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1827 		wrpll_params.dco_integer;
1828 
1829 	hw_state->cfgcr2 =
1830 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1831 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1832 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1833 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1834 		wrpll_params.central_freq;
1835 
1836 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1837 							&crtc_state->dpll_hw_state);
1838 
1839 	return 0;
1840 }
1841 
1842 static int
1843 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1844 {
1845 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1846 	u32 ctrl1;
1847 
1848 	/*
1849 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1850 	 * as the DPLL id in this function.
1851 	 */
1852 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1853 	switch (crtc_state->port_clock / 2) {
1854 	case 81000:
1855 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1856 		break;
1857 	case 135000:
1858 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1859 		break;
1860 	case 270000:
1861 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1862 		break;
1863 		/* eDP 1.4 rates */
1864 	case 162000:
1865 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1866 		break;
1867 	case 108000:
1868 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1869 		break;
1870 	case 216000:
1871 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1872 		break;
1873 	}
1874 
1875 	hw_state->ctrl1 = ctrl1;
1876 
1877 	return 0;
1878 }
1879 
1880 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1881 				  const struct intel_shared_dpll *pll,
1882 				  const struct intel_dpll_hw_state *dpll_hw_state)
1883 {
1884 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1885 	int link_clock = 0;
1886 
1887 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1888 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1889 	case DPLL_CTRL1_LINK_RATE_810:
1890 		link_clock = 81000;
1891 		break;
1892 	case DPLL_CTRL1_LINK_RATE_1080:
1893 		link_clock = 108000;
1894 		break;
1895 	case DPLL_CTRL1_LINK_RATE_1350:
1896 		link_clock = 135000;
1897 		break;
1898 	case DPLL_CTRL1_LINK_RATE_1620:
1899 		link_clock = 162000;
1900 		break;
1901 	case DPLL_CTRL1_LINK_RATE_2160:
1902 		link_clock = 216000;
1903 		break;
1904 	case DPLL_CTRL1_LINK_RATE_2700:
1905 		link_clock = 270000;
1906 		break;
1907 	default:
1908 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1909 		break;
1910 	}
1911 
1912 	return link_clock * 2;
1913 }
1914 
1915 static int skl_compute_dpll(struct intel_atomic_state *state,
1916 			    struct intel_crtc *crtc,
1917 			    struct intel_encoder *encoder)
1918 {
1919 	struct intel_crtc_state *crtc_state =
1920 		intel_atomic_get_new_crtc_state(state, crtc);
1921 
1922 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1923 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1924 	else if (intel_crtc_has_dp_encoder(crtc_state))
1925 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1926 	else
1927 		return -EINVAL;
1928 }
1929 
1930 static int skl_get_dpll(struct intel_atomic_state *state,
1931 			struct intel_crtc *crtc,
1932 			struct intel_encoder *encoder)
1933 {
1934 	struct intel_crtc_state *crtc_state =
1935 		intel_atomic_get_new_crtc_state(state, crtc);
1936 	struct intel_shared_dpll *pll;
1937 
1938 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1939 		pll = intel_find_shared_dpll(state, crtc,
1940 					     &crtc_state->dpll_hw_state,
1941 					     BIT(DPLL_ID_SKL_DPLL0));
1942 	else
1943 		pll = intel_find_shared_dpll(state, crtc,
1944 					     &crtc_state->dpll_hw_state,
1945 					     BIT(DPLL_ID_SKL_DPLL3) |
1946 					     BIT(DPLL_ID_SKL_DPLL2) |
1947 					     BIT(DPLL_ID_SKL_DPLL1));
1948 	if (!pll)
1949 		return -EINVAL;
1950 
1951 	intel_reference_shared_dpll(state, crtc,
1952 				    pll, &crtc_state->dpll_hw_state);
1953 
1954 	crtc_state->shared_dpll = pll;
1955 
1956 	return 0;
1957 }
1958 
1959 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1960 				const struct intel_shared_dpll *pll,
1961 				const struct intel_dpll_hw_state *dpll_hw_state)
1962 {
1963 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1964 
1965 	/*
1966 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1967 	 * the internal shift for each field
1968 	 */
1969 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1970 		return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
1971 	else
1972 		return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
1973 }
1974 
1975 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1976 {
1977 	/* No SSC ref */
1978 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1979 }
1980 
1981 static void skl_dump_hw_state(struct drm_printer *p,
1982 			      const struct intel_dpll_hw_state *dpll_hw_state)
1983 {
1984 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1985 
1986 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1987 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1988 }
1989 
1990 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1991 				 const struct intel_dpll_hw_state *_b)
1992 {
1993 	const struct skl_dpll_hw_state *a = &_a->skl;
1994 	const struct skl_dpll_hw_state *b = &_b->skl;
1995 
1996 	return a->ctrl1 == b->ctrl1 &&
1997 		a->cfgcr1 == b->cfgcr1 &&
1998 		a->cfgcr2 == b->cfgcr2;
1999 }
2000 
2001 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2002 	.enable = skl_ddi_pll_enable,
2003 	.disable = skl_ddi_pll_disable,
2004 	.get_hw_state = skl_ddi_pll_get_hw_state,
2005 	.get_freq = skl_ddi_pll_get_freq,
2006 };
2007 
2008 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2009 	.enable = skl_ddi_dpll0_enable,
2010 	.disable = skl_ddi_dpll0_disable,
2011 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2012 	.get_freq = skl_ddi_pll_get_freq,
2013 };
2014 
2015 static const struct dpll_info skl_plls[] = {
2016 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2017 	  .always_on = true, },
2018 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2019 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2020 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2021 	{}
2022 };
2023 
2024 static const struct intel_dpll_mgr skl_pll_mgr = {
2025 	.dpll_info = skl_plls,
2026 	.compute_dplls = skl_compute_dpll,
2027 	.get_dplls = skl_get_dpll,
2028 	.put_dplls = intel_put_dpll,
2029 	.update_ref_clks = skl_update_dpll_ref_clks,
2030 	.dump_hw_state = skl_dump_hw_state,
2031 	.compare_hw_state = skl_compare_hw_state,
2032 };
2033 
2034 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
2035 			       struct intel_shared_dpll *pll,
2036 			       const struct intel_dpll_hw_state *dpll_hw_state)
2037 {
2038 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2039 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2040 	enum dpio_phy phy;
2041 	enum dpio_channel ch;
2042 	u32 temp;
2043 
2044 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2045 
2046 	/* Non-SSC reference */
2047 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2048 
2049 	if (IS_GEMINILAKE(i915)) {
2050 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2051 			     0, PORT_PLL_POWER_ENABLE);
2052 
2053 		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2054 				 PORT_PLL_POWER_STATE), 200))
2055 			drm_err(&i915->drm,
2056 				"Power state not set for PLL:%d\n", port);
2057 	}
2058 
2059 	/* Disable 10 bit clock */
2060 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2061 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2062 
2063 	/* Write P1 & P2 */
2064 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2065 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2066 
2067 	/* Write M2 integer */
2068 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2069 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2070 
2071 	/* Write N */
2072 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2073 		     PORT_PLL_N_MASK, hw_state->pll1);
2074 
2075 	/* Write M2 fraction */
2076 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2077 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2078 
2079 	/* Write M2 fraction enable */
2080 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2081 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2082 
2083 	/* Write coeff */
2084 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2085 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2086 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2087 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2088 	temp |= hw_state->pll6;
2089 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2090 
2091 	/* Write calibration val */
2092 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2093 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2094 
2095 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2096 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2097 
2098 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2099 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2100 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2101 	temp |= hw_state->pll10;
2102 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2103 
2104 	/* Recalibrate with new settings */
2105 	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2106 	temp |= PORT_PLL_RECALIBRATE;
2107 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2108 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2109 	temp |= hw_state->ebb4;
2110 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2111 
2112 	/* Enable PLL */
2113 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2114 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2115 
2116 	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2117 			200))
2118 		drm_err(&i915->drm, "PLL %d not locked\n", port);
2119 
2120 	if (IS_GEMINILAKE(i915)) {
2121 		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2122 		temp |= DCC_DELAY_RANGE_2;
2123 		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2124 	}
2125 
2126 	/*
2127 	 * While we write to the group register to program all lanes at once we
2128 	 * can read only lane registers and we pick lanes 0/1 for that.
2129 	 */
2130 	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2131 	temp &= ~LANE_STAGGER_MASK;
2132 	temp &= ~LANESTAGGER_STRAP_OVRD;
2133 	temp |= hw_state->pcsdw12;
2134 	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2135 }
2136 
2137 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2138 				struct intel_shared_dpll *pll)
2139 {
2140 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2141 
2142 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2143 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2144 
2145 	if (IS_GEMINILAKE(i915)) {
2146 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2147 			     PORT_PLL_POWER_ENABLE, 0);
2148 
2149 		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2150 				  PORT_PLL_POWER_STATE), 200))
2151 			drm_err(&i915->drm,
2152 				"Power state not reset for PLL:%d\n", port);
2153 	}
2154 }
2155 
2156 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2157 				     struct intel_shared_dpll *pll,
2158 				     struct intel_dpll_hw_state *dpll_hw_state)
2159 {
2160 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2161 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2162 	intel_wakeref_t wakeref;
2163 	enum dpio_phy phy;
2164 	enum dpio_channel ch;
2165 	u32 val;
2166 	bool ret;
2167 
2168 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2169 
2170 	wakeref = intel_display_power_get_if_enabled(i915,
2171 						     POWER_DOMAIN_DISPLAY_CORE);
2172 	if (!wakeref)
2173 		return false;
2174 
2175 	ret = false;
2176 
2177 	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2178 	if (!(val & PORT_PLL_ENABLE))
2179 		goto out;
2180 
2181 	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2182 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2183 
2184 	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2185 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2186 
2187 	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2188 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2189 
2190 	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2191 	hw_state->pll1 &= PORT_PLL_N_MASK;
2192 
2193 	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2194 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2195 
2196 	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2197 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2198 
2199 	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2200 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2201 			  PORT_PLL_INT_COEFF_MASK |
2202 			  PORT_PLL_GAIN_CTL_MASK;
2203 
2204 	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2205 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2206 
2207 	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2208 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2209 
2210 	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2211 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2212 			   PORT_PLL_DCO_AMP_MASK;
2213 
2214 	/*
2215 	 * While we write to the group register to program all lanes at once we
2216 	 * can read only lane registers. We configure all lanes the same way, so
2217 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2218 	 */
2219 	hw_state->pcsdw12 = intel_de_read(i915,
2220 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2221 	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2222 		drm_dbg(&i915->drm,
2223 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2224 			hw_state->pcsdw12,
2225 			intel_de_read(i915,
2226 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2227 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2228 
2229 	ret = true;
2230 
2231 out:
2232 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2233 
2234 	return ret;
2235 }
2236 
2237 /* pre-calculated values for DP linkrates */
2238 static const struct dpll bxt_dp_clk_val[] = {
2239 	/* m2 is .22 binary fixed point */
2240 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2241 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2242 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2243 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2244 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2245 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2246 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2247 };
2248 
2249 static int
2250 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2251 			  struct dpll *clk_div)
2252 {
2253 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2254 
2255 	/* Calculate HDMI div */
2256 	/*
2257 	 * FIXME: tie the following calculation into
2258 	 * i9xx_crtc_compute_clock
2259 	 */
2260 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2261 		return -EINVAL;
2262 
2263 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2264 
2265 	return 0;
2266 }
2267 
2268 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2269 				    struct dpll *clk_div)
2270 {
2271 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2272 	int i;
2273 
2274 	*clk_div = bxt_dp_clk_val[0];
2275 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2276 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2277 			*clk_div = bxt_dp_clk_val[i];
2278 			break;
2279 		}
2280 	}
2281 
2282 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2283 
2284 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2285 		    clk_div->dot != crtc_state->port_clock);
2286 }
2287 
2288 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2289 				     const struct dpll *clk_div)
2290 {
2291 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2292 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2293 	int clock = crtc_state->port_clock;
2294 	int vco = clk_div->vco;
2295 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2296 	u32 lanestagger;
2297 
2298 	if (vco >= 6200000 && vco <= 6700000) {
2299 		prop_coef = 4;
2300 		int_coef = 9;
2301 		gain_ctl = 3;
2302 		targ_cnt = 8;
2303 	} else if ((vco > 5400000 && vco < 6200000) ||
2304 			(vco >= 4800000 && vco < 5400000)) {
2305 		prop_coef = 5;
2306 		int_coef = 11;
2307 		gain_ctl = 3;
2308 		targ_cnt = 9;
2309 	} else if (vco == 5400000) {
2310 		prop_coef = 3;
2311 		int_coef = 8;
2312 		gain_ctl = 1;
2313 		targ_cnt = 9;
2314 	} else {
2315 		drm_err(&i915->drm, "Invalid VCO\n");
2316 		return -EINVAL;
2317 	}
2318 
2319 	if (clock > 270000)
2320 		lanestagger = 0x18;
2321 	else if (clock > 135000)
2322 		lanestagger = 0x0d;
2323 	else if (clock > 67000)
2324 		lanestagger = 0x07;
2325 	else if (clock > 33000)
2326 		lanestagger = 0x04;
2327 	else
2328 		lanestagger = 0x02;
2329 
2330 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2331 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2332 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2333 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2334 
2335 	if (clk_div->m2 & 0x3fffff)
2336 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2337 
2338 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2339 		PORT_PLL_INT_COEFF(int_coef) |
2340 		PORT_PLL_GAIN_CTL(gain_ctl);
2341 
2342 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2343 
2344 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2345 
2346 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2347 		PORT_PLL_DCO_AMP_OVR_EN_H;
2348 
2349 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2350 
2351 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2352 
2353 	return 0;
2354 }
2355 
2356 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2357 				const struct intel_shared_dpll *pll,
2358 				const struct intel_dpll_hw_state *dpll_hw_state)
2359 {
2360 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2361 	struct dpll clock;
2362 
2363 	clock.m1 = 2;
2364 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2365 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2366 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2367 					  hw_state->pll2);
2368 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2369 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2370 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2371 
2372 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2373 }
2374 
2375 static int
2376 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2377 {
2378 	struct dpll clk_div = {};
2379 
2380 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2381 
2382 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2383 }
2384 
2385 static int
2386 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2387 {
2388 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2389 	struct dpll clk_div = {};
2390 	int ret;
2391 
2392 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2393 
2394 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2395 	if (ret)
2396 		return ret;
2397 
2398 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2399 						      &crtc_state->dpll_hw_state);
2400 
2401 	return 0;
2402 }
2403 
2404 static int bxt_compute_dpll(struct intel_atomic_state *state,
2405 			    struct intel_crtc *crtc,
2406 			    struct intel_encoder *encoder)
2407 {
2408 	struct intel_crtc_state *crtc_state =
2409 		intel_atomic_get_new_crtc_state(state, crtc);
2410 
2411 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2412 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2413 	else if (intel_crtc_has_dp_encoder(crtc_state))
2414 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2415 	else
2416 		return -EINVAL;
2417 }
2418 
2419 static int bxt_get_dpll(struct intel_atomic_state *state,
2420 			struct intel_crtc *crtc,
2421 			struct intel_encoder *encoder)
2422 {
2423 	struct intel_crtc_state *crtc_state =
2424 		intel_atomic_get_new_crtc_state(state, crtc);
2425 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2426 	struct intel_shared_dpll *pll;
2427 	enum intel_dpll_id id;
2428 
2429 	/* 1:1 mapping between ports and PLLs */
2430 	id = (enum intel_dpll_id) encoder->port;
2431 	pll = intel_get_shared_dpll_by_id(i915, id);
2432 
2433 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2434 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2435 
2436 	intel_reference_shared_dpll(state, crtc,
2437 				    pll, &crtc_state->dpll_hw_state);
2438 
2439 	crtc_state->shared_dpll = pll;
2440 
2441 	return 0;
2442 }
2443 
2444 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2445 {
2446 	i915->display.dpll.ref_clks.ssc = 100000;
2447 	i915->display.dpll.ref_clks.nssc = 100000;
2448 	/* DSI non-SSC ref 19.2MHz */
2449 }
2450 
2451 static void bxt_dump_hw_state(struct drm_printer *p,
2452 			      const struct intel_dpll_hw_state *dpll_hw_state)
2453 {
2454 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2455 
2456 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2457 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2458 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2459 		   hw_state->ebb0, hw_state->ebb4,
2460 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2461 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2462 		   hw_state->pcsdw12);
2463 }
2464 
2465 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2466 				 const struct intel_dpll_hw_state *_b)
2467 {
2468 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2469 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2470 
2471 	return a->ebb0 == b->ebb0 &&
2472 		a->ebb4 == b->ebb4 &&
2473 		a->pll0 == b->pll0 &&
2474 		a->pll1 == b->pll1 &&
2475 		a->pll2 == b->pll2 &&
2476 		a->pll3 == b->pll3 &&
2477 		a->pll6 == b->pll6 &&
2478 		a->pll8 == b->pll8 &&
2479 		a->pll10 == b->pll10 &&
2480 		a->pcsdw12 == b->pcsdw12;
2481 }
2482 
2483 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2484 	.enable = bxt_ddi_pll_enable,
2485 	.disable = bxt_ddi_pll_disable,
2486 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2487 	.get_freq = bxt_ddi_pll_get_freq,
2488 };
2489 
2490 static const struct dpll_info bxt_plls[] = {
2491 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2492 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2493 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2494 	{}
2495 };
2496 
2497 static const struct intel_dpll_mgr bxt_pll_mgr = {
2498 	.dpll_info = bxt_plls,
2499 	.compute_dplls = bxt_compute_dpll,
2500 	.get_dplls = bxt_get_dpll,
2501 	.put_dplls = intel_put_dpll,
2502 	.update_ref_clks = bxt_update_dpll_ref_clks,
2503 	.dump_hw_state = bxt_dump_hw_state,
2504 	.compare_hw_state = bxt_compare_hw_state,
2505 };
2506 
2507 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2508 				      int *qdiv, int *kdiv)
2509 {
2510 	/* even dividers */
2511 	if (bestdiv % 2 == 0) {
2512 		if (bestdiv == 2) {
2513 			*pdiv = 2;
2514 			*qdiv = 1;
2515 			*kdiv = 1;
2516 		} else if (bestdiv % 4 == 0) {
2517 			*pdiv = 2;
2518 			*qdiv = bestdiv / 4;
2519 			*kdiv = 2;
2520 		} else if (bestdiv % 6 == 0) {
2521 			*pdiv = 3;
2522 			*qdiv = bestdiv / 6;
2523 			*kdiv = 2;
2524 		} else if (bestdiv % 5 == 0) {
2525 			*pdiv = 5;
2526 			*qdiv = bestdiv / 10;
2527 			*kdiv = 2;
2528 		} else if (bestdiv % 14 == 0) {
2529 			*pdiv = 7;
2530 			*qdiv = bestdiv / 14;
2531 			*kdiv = 2;
2532 		}
2533 	} else {
2534 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2535 			*pdiv = bestdiv;
2536 			*qdiv = 1;
2537 			*kdiv = 1;
2538 		} else { /* 9, 15, 21 */
2539 			*pdiv = bestdiv / 3;
2540 			*qdiv = 1;
2541 			*kdiv = 3;
2542 		}
2543 	}
2544 }
2545 
2546 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2547 				      u32 dco_freq, u32 ref_freq,
2548 				      int pdiv, int qdiv, int kdiv)
2549 {
2550 	u32 dco;
2551 
2552 	switch (kdiv) {
2553 	case 1:
2554 		params->kdiv = 1;
2555 		break;
2556 	case 2:
2557 		params->kdiv = 2;
2558 		break;
2559 	case 3:
2560 		params->kdiv = 4;
2561 		break;
2562 	default:
2563 		WARN(1, "Incorrect KDiv\n");
2564 	}
2565 
2566 	switch (pdiv) {
2567 	case 2:
2568 		params->pdiv = 1;
2569 		break;
2570 	case 3:
2571 		params->pdiv = 2;
2572 		break;
2573 	case 5:
2574 		params->pdiv = 4;
2575 		break;
2576 	case 7:
2577 		params->pdiv = 8;
2578 		break;
2579 	default:
2580 		WARN(1, "Incorrect PDiv\n");
2581 	}
2582 
2583 	WARN_ON(kdiv != 2 && qdiv != 1);
2584 
2585 	params->qdiv_ratio = qdiv;
2586 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2587 
2588 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2589 
2590 	params->dco_integer = dco >> 15;
2591 	params->dco_fraction = dco & 0x7fff;
2592 }
2593 
2594 /*
2595  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2596  * Program half of the nominal DCO divider fraction value.
2597  */
2598 static bool
2599 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2600 {
2601 	return ((IS_ELKHARTLAKE(i915) &&
2602 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2603 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2604 		 i915->display.dpll.ref_clks.nssc == 38400;
2605 }
2606 
2607 struct icl_combo_pll_params {
2608 	int clock;
2609 	struct skl_wrpll_params wrpll;
2610 };
2611 
2612 /*
2613  * These values alrea already adjusted: they're the bits we write to the
2614  * registers, not the logical values.
2615  */
2616 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2617 	{ 540000,
2618 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2619 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2620 	{ 270000,
2621 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2622 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2623 	{ 162000,
2624 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2625 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2626 	{ 324000,
2627 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2628 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2629 	{ 216000,
2630 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2631 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2632 	{ 432000,
2633 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2634 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2635 	{ 648000,
2636 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2637 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2638 	{ 810000,
2639 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2640 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2641 };
2642 
2643 
2644 /* Also used for 38.4 MHz values. */
2645 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2646 	{ 540000,
2647 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2648 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2649 	{ 270000,
2650 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2651 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2652 	{ 162000,
2653 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2654 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2655 	{ 324000,
2656 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2657 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2658 	{ 216000,
2659 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2660 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2661 	{ 432000,
2662 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2663 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2664 	{ 648000,
2665 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2666 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2667 	{ 810000,
2668 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2669 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2670 };
2671 
2672 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2673 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2674 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2675 };
2676 
2677 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2678 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2679 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2680 };
2681 
2682 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2683 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2684 	/* the following params are unused */
2685 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2686 };
2687 
2688 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2689 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2690 	/* the following params are unused */
2691 };
2692 
2693 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2694 				 struct skl_wrpll_params *pll_params)
2695 {
2696 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2697 	const struct icl_combo_pll_params *params =
2698 		i915->display.dpll.ref_clks.nssc == 24000 ?
2699 		icl_dp_combo_pll_24MHz_values :
2700 		icl_dp_combo_pll_19_2MHz_values;
2701 	int clock = crtc_state->port_clock;
2702 	int i;
2703 
2704 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2705 		if (clock == params[i].clock) {
2706 			*pll_params = params[i].wrpll;
2707 			return 0;
2708 		}
2709 	}
2710 
2711 	MISSING_CASE(clock);
2712 	return -EINVAL;
2713 }
2714 
2715 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2716 			    struct skl_wrpll_params *pll_params)
2717 {
2718 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2719 
2720 	if (DISPLAY_VER(i915) >= 12) {
2721 		switch (i915->display.dpll.ref_clks.nssc) {
2722 		default:
2723 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2724 			fallthrough;
2725 		case 19200:
2726 		case 38400:
2727 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2728 			break;
2729 		case 24000:
2730 			*pll_params = tgl_tbt_pll_24MHz_values;
2731 			break;
2732 		}
2733 	} else {
2734 		switch (i915->display.dpll.ref_clks.nssc) {
2735 		default:
2736 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2737 			fallthrough;
2738 		case 19200:
2739 		case 38400:
2740 			*pll_params = icl_tbt_pll_19_2MHz_values;
2741 			break;
2742 		case 24000:
2743 			*pll_params = icl_tbt_pll_24MHz_values;
2744 			break;
2745 		}
2746 	}
2747 
2748 	return 0;
2749 }
2750 
2751 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2752 				    const struct intel_shared_dpll *pll,
2753 				    const struct intel_dpll_hw_state *dpll_hw_state)
2754 {
2755 	/*
2756 	 * The PLL outputs multiple frequencies at the same time, selection is
2757 	 * made at DDI clock mux level.
2758 	 */
2759 	drm_WARN_ON(&i915->drm, 1);
2760 
2761 	return 0;
2762 }
2763 
2764 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2765 {
2766 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2767 
2768 	/*
2769 	 * For ICL+, the spec states: if reference frequency is 38.4,
2770 	 * use 19.2 because the DPLL automatically divides that by 2.
2771 	 */
2772 	if (ref_clock == 38400)
2773 		ref_clock = 19200;
2774 
2775 	return ref_clock;
2776 }
2777 
2778 static int
2779 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2780 	       struct skl_wrpll_params *wrpll_params)
2781 {
2782 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2783 	int ref_clock = icl_wrpll_ref_clock(i915);
2784 	u32 afe_clock = crtc_state->port_clock * 5;
2785 	u32 dco_min = 7998000;
2786 	u32 dco_max = 10000000;
2787 	u32 dco_mid = (dco_min + dco_max) / 2;
2788 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2789 					 18, 20, 24, 28, 30, 32,  36,  40,
2790 					 42, 44, 48, 50, 52, 54,  56,  60,
2791 					 64, 66, 68, 70, 72, 76,  78,  80,
2792 					 84, 88, 90, 92, 96, 98, 100, 102,
2793 					  3,  5,  7,  9, 15, 21 };
2794 	u32 dco, best_dco = 0, dco_centrality = 0;
2795 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2796 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2797 
2798 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2799 		dco = afe_clock * dividers[d];
2800 
2801 		if (dco <= dco_max && dco >= dco_min) {
2802 			dco_centrality = abs(dco - dco_mid);
2803 
2804 			if (dco_centrality < best_dco_centrality) {
2805 				best_dco_centrality = dco_centrality;
2806 				best_div = dividers[d];
2807 				best_dco = dco;
2808 			}
2809 		}
2810 	}
2811 
2812 	if (best_div == 0)
2813 		return -EINVAL;
2814 
2815 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2816 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2817 				  pdiv, qdiv, kdiv);
2818 
2819 	return 0;
2820 }
2821 
2822 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2823 				      const struct intel_shared_dpll *pll,
2824 				      const struct intel_dpll_hw_state *dpll_hw_state)
2825 {
2826 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2827 	int ref_clock = icl_wrpll_ref_clock(i915);
2828 	u32 dco_fraction;
2829 	u32 p0, p1, p2, dco_freq;
2830 
2831 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2832 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2833 
2834 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2835 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2836 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2837 	else
2838 		p1 = 1;
2839 
2840 	switch (p0) {
2841 	case DPLL_CFGCR1_PDIV_2:
2842 		p0 = 2;
2843 		break;
2844 	case DPLL_CFGCR1_PDIV_3:
2845 		p0 = 3;
2846 		break;
2847 	case DPLL_CFGCR1_PDIV_5:
2848 		p0 = 5;
2849 		break;
2850 	case DPLL_CFGCR1_PDIV_7:
2851 		p0 = 7;
2852 		break;
2853 	}
2854 
2855 	switch (p2) {
2856 	case DPLL_CFGCR1_KDIV_1:
2857 		p2 = 1;
2858 		break;
2859 	case DPLL_CFGCR1_KDIV_2:
2860 		p2 = 2;
2861 		break;
2862 	case DPLL_CFGCR1_KDIV_3:
2863 		p2 = 3;
2864 		break;
2865 	}
2866 
2867 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2868 		   ref_clock;
2869 
2870 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2871 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2872 
2873 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2874 		dco_fraction *= 2;
2875 
2876 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2877 
2878 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2879 		return 0;
2880 
2881 	return dco_freq / (p0 * p1 * p2 * 5);
2882 }
2883 
2884 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2885 				const struct skl_wrpll_params *pll_params,
2886 				struct intel_dpll_hw_state *dpll_hw_state)
2887 {
2888 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2889 	u32 dco_fraction = pll_params->dco_fraction;
2890 
2891 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2892 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2893 
2894 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2895 			    pll_params->dco_integer;
2896 
2897 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2898 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2899 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2900 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2901 
2902 	if (DISPLAY_VER(i915) >= 12)
2903 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2904 	else
2905 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2906 
2907 	if (i915->display.vbt.override_afc_startup)
2908 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2909 }
2910 
2911 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2912 				    u32 *target_dco_khz,
2913 				    struct icl_dpll_hw_state *hw_state,
2914 				    bool is_dkl)
2915 {
2916 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2917 	u32 dco_min_freq, dco_max_freq;
2918 	unsigned int i;
2919 	int div2;
2920 
2921 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2922 	dco_max_freq = is_dp ? 8100000 : 10000000;
2923 
2924 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2925 		int div1 = div1_vals[i];
2926 
2927 		for (div2 = 10; div2 > 0; div2--) {
2928 			int dco = div1 * div2 * clock_khz * 5;
2929 			int a_divratio, tlinedrv, inputsel;
2930 			u32 hsdiv;
2931 
2932 			if (dco < dco_min_freq || dco > dco_max_freq)
2933 				continue;
2934 
2935 			if (div2 >= 2) {
2936 				/*
2937 				 * Note: a_divratio not matching TGL BSpec
2938 				 * algorithm but matching hardcoded values and
2939 				 * working on HW for DP alt-mode at least
2940 				 */
2941 				a_divratio = is_dp ? 10 : 5;
2942 				tlinedrv = is_dkl ? 1 : 2;
2943 			} else {
2944 				a_divratio = 5;
2945 				tlinedrv = 0;
2946 			}
2947 			inputsel = is_dp ? 0 : 1;
2948 
2949 			switch (div1) {
2950 			default:
2951 				MISSING_CASE(div1);
2952 				fallthrough;
2953 			case 2:
2954 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2955 				break;
2956 			case 3:
2957 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2958 				break;
2959 			case 5:
2960 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2961 				break;
2962 			case 7:
2963 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2964 				break;
2965 			}
2966 
2967 			*target_dco_khz = dco;
2968 
2969 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2970 
2971 			hw_state->mg_clktop2_coreclkctl1 =
2972 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2973 
2974 			hw_state->mg_clktop2_hsclkctl =
2975 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2976 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2977 				hsdiv |
2978 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2979 
2980 			return 0;
2981 		}
2982 	}
2983 
2984 	return -EINVAL;
2985 }
2986 
2987 /*
2988  * The specification for this function uses real numbers, so the math had to be
2989  * adapted to integer-only calculation, that's why it looks so different.
2990  */
2991 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2992 				 struct intel_dpll_hw_state *dpll_hw_state)
2993 {
2994 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2995 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2996 	int refclk_khz = i915->display.dpll.ref_clks.nssc;
2997 	int clock = crtc_state->port_clock;
2998 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2999 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3000 	u32 prop_coeff, int_coeff;
3001 	u32 tdc_targetcnt, feedfwgain;
3002 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3003 	u64 tmp;
3004 	bool use_ssc = false;
3005 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3006 	bool is_dkl = DISPLAY_VER(i915) >= 12;
3007 	int ret;
3008 
3009 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3010 				       hw_state, is_dkl);
3011 	if (ret)
3012 		return ret;
3013 
3014 	m1div = 2;
3015 	m2div_int = dco_khz / (refclk_khz * m1div);
3016 	if (m2div_int > 255) {
3017 		if (!is_dkl) {
3018 			m1div = 4;
3019 			m2div_int = dco_khz / (refclk_khz * m1div);
3020 		}
3021 
3022 		if (m2div_int > 255)
3023 			return -EINVAL;
3024 	}
3025 	m2div_rem = dco_khz % (refclk_khz * m1div);
3026 
3027 	tmp = (u64)m2div_rem * (1 << 22);
3028 	do_div(tmp, refclk_khz * m1div);
3029 	m2div_frac = tmp;
3030 
3031 	switch (refclk_khz) {
3032 	case 19200:
3033 		iref_ndiv = 1;
3034 		iref_trim = 28;
3035 		iref_pulse_w = 1;
3036 		break;
3037 	case 24000:
3038 		iref_ndiv = 1;
3039 		iref_trim = 25;
3040 		iref_pulse_w = 2;
3041 		break;
3042 	case 38400:
3043 		iref_ndiv = 2;
3044 		iref_trim = 28;
3045 		iref_pulse_w = 1;
3046 		break;
3047 	default:
3048 		MISSING_CASE(refclk_khz);
3049 		return -EINVAL;
3050 	}
3051 
3052 	/*
3053 	 * tdc_res = 0.000003
3054 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3055 	 *
3056 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3057 	 * was supposed to be a division, but we rearranged the operations of
3058 	 * the formula to avoid early divisions so we don't multiply the
3059 	 * rounding errors.
3060 	 *
3061 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3062 	 * we also rearrange to work with integers.
3063 	 *
3064 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3065 	 * last division by 10.
3066 	 */
3067 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3068 
3069 	/*
3070 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3071 	 * 32 bits. That's not a problem since we round the division down
3072 	 * anyway.
3073 	 */
3074 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3075 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3076 
3077 	if (dco_khz >= 9000000) {
3078 		prop_coeff = 5;
3079 		int_coeff = 10;
3080 	} else {
3081 		prop_coeff = 4;
3082 		int_coeff = 8;
3083 	}
3084 
3085 	if (use_ssc) {
3086 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3087 		do_div(tmp, refclk_khz * m1div * 10000);
3088 		ssc_stepsize = tmp;
3089 
3090 		tmp = mul_u32_u32(dco_khz, 1000);
3091 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3092 	} else {
3093 		ssc_stepsize = 0;
3094 		ssc_steplen = 0;
3095 	}
3096 	ssc_steplog = 4;
3097 
3098 	/* write pll_state calculations */
3099 	if (is_dkl) {
3100 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3101 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3102 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3103 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3104 		if (i915->display.vbt.override_afc_startup) {
3105 			u8 val = i915->display.vbt.override_afc_startup_val;
3106 
3107 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3108 		}
3109 
3110 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3111 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3112 
3113 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3114 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3115 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3116 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3117 
3118 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3119 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3120 
3121 		hw_state->mg_pll_tdc_coldst_bias =
3122 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3123 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3124 
3125 	} else {
3126 		hw_state->mg_pll_div0 =
3127 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3128 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3129 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3130 
3131 		hw_state->mg_pll_div1 =
3132 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3133 			MG_PLL_DIV1_DITHER_DIV_2 |
3134 			MG_PLL_DIV1_NDIVRATIO(1) |
3135 			MG_PLL_DIV1_FBPREDIV(m1div);
3136 
3137 		hw_state->mg_pll_lf =
3138 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3139 			MG_PLL_LF_AFCCNTSEL_512 |
3140 			MG_PLL_LF_GAINCTRL(1) |
3141 			MG_PLL_LF_INT_COEFF(int_coeff) |
3142 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3143 
3144 		hw_state->mg_pll_frac_lock =
3145 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3146 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3147 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3148 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3149 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3150 		if (use_ssc || m2div_rem > 0)
3151 			hw_state->mg_pll_frac_lock |=
3152 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3153 
3154 		hw_state->mg_pll_ssc =
3155 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3156 			MG_PLL_SSC_TYPE(2) |
3157 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3158 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3159 			MG_PLL_SSC_FLLEN |
3160 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3161 
3162 		hw_state->mg_pll_tdc_coldst_bias =
3163 			MG_PLL_TDC_COLDST_COLDSTART |
3164 			MG_PLL_TDC_COLDST_IREFINT_EN |
3165 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3166 			MG_PLL_TDC_TDCOVCCORR_EN |
3167 			MG_PLL_TDC_TDCSEL(3);
3168 
3169 		hw_state->mg_pll_bias =
3170 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3171 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3172 			MG_PLL_BIAS_BIAS_BONUS(10) |
3173 			MG_PLL_BIAS_BIASCAL_EN |
3174 			MG_PLL_BIAS_CTRIM(12) |
3175 			MG_PLL_BIAS_VREF_RDAC(4) |
3176 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3177 
3178 		if (refclk_khz == 38400) {
3179 			hw_state->mg_pll_tdc_coldst_bias_mask =
3180 				MG_PLL_TDC_COLDST_COLDSTART;
3181 			hw_state->mg_pll_bias_mask = 0;
3182 		} else {
3183 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3184 			hw_state->mg_pll_bias_mask = -1U;
3185 		}
3186 
3187 		hw_state->mg_pll_tdc_coldst_bias &=
3188 			hw_state->mg_pll_tdc_coldst_bias_mask;
3189 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3190 	}
3191 
3192 	return 0;
3193 }
3194 
3195 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3196 				   const struct intel_shared_dpll *pll,
3197 				   const struct intel_dpll_hw_state *dpll_hw_state)
3198 {
3199 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3200 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3201 	u64 tmp;
3202 
3203 	ref_clock = i915->display.dpll.ref_clks.nssc;
3204 
3205 	if (DISPLAY_VER(i915) >= 12) {
3206 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3207 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3208 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3209 
3210 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3211 			m2_frac = hw_state->mg_pll_bias &
3212 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3213 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3214 		} else {
3215 			m2_frac = 0;
3216 		}
3217 	} else {
3218 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3219 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3220 
3221 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3222 			m2_frac = hw_state->mg_pll_div0 &
3223 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3224 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3225 		} else {
3226 			m2_frac = 0;
3227 		}
3228 	}
3229 
3230 	switch (hw_state->mg_clktop2_hsclkctl &
3231 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3232 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3233 		div1 = 2;
3234 		break;
3235 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3236 		div1 = 3;
3237 		break;
3238 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3239 		div1 = 5;
3240 		break;
3241 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3242 		div1 = 7;
3243 		break;
3244 	default:
3245 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3246 		return 0;
3247 	}
3248 
3249 	div2 = (hw_state->mg_clktop2_hsclkctl &
3250 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3251 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3252 
3253 	/* div2 value of 0 is same as 1 means no div */
3254 	if (div2 == 0)
3255 		div2 = 1;
3256 
3257 	/*
3258 	 * Adjust the original formula to delay the division by 2^22 in order to
3259 	 * minimize possible rounding errors.
3260 	 */
3261 	tmp = (u64)m1 * m2_int * ref_clock +
3262 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3263 	tmp = div_u64(tmp, 5 * div1 * div2);
3264 
3265 	return tmp;
3266 }
3267 
3268 /**
3269  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3270  * @crtc_state: state for the CRTC to select the DPLL for
3271  * @port_dpll_id: the active @port_dpll_id to select
3272  *
3273  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3274  * CRTC.
3275  */
3276 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3277 			      enum icl_port_dpll_id port_dpll_id)
3278 {
3279 	struct icl_port_dpll *port_dpll =
3280 		&crtc_state->icl_port_dplls[port_dpll_id];
3281 
3282 	crtc_state->shared_dpll = port_dpll->pll;
3283 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3284 }
3285 
3286 static void icl_update_active_dpll(struct intel_atomic_state *state,
3287 				   struct intel_crtc *crtc,
3288 				   struct intel_encoder *encoder)
3289 {
3290 	struct intel_crtc_state *crtc_state =
3291 		intel_atomic_get_new_crtc_state(state, crtc);
3292 	struct intel_digital_port *primary_port;
3293 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3294 
3295 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3296 		enc_to_mst(encoder)->primary :
3297 		enc_to_dig_port(encoder);
3298 
3299 	if (primary_port &&
3300 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3301 	     intel_tc_port_in_legacy_mode(primary_port)))
3302 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3303 
3304 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3305 }
3306 
3307 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3308 				      struct intel_crtc *crtc)
3309 {
3310 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3311 	struct intel_crtc_state *crtc_state =
3312 		intel_atomic_get_new_crtc_state(state, crtc);
3313 	struct icl_port_dpll *port_dpll =
3314 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3315 	struct skl_wrpll_params pll_params = {};
3316 	int ret;
3317 
3318 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3319 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3320 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3321 	else
3322 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3323 
3324 	if (ret)
3325 		return ret;
3326 
3327 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3328 
3329 	/* this is mainly for the fastset check */
3330 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3331 
3332 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3333 							    &port_dpll->hw_state);
3334 
3335 	return 0;
3336 }
3337 
3338 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3339 				  struct intel_crtc *crtc,
3340 				  struct intel_encoder *encoder)
3341 {
3342 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3343 	struct intel_crtc_state *crtc_state =
3344 		intel_atomic_get_new_crtc_state(state, crtc);
3345 	struct icl_port_dpll *port_dpll =
3346 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3347 	enum port port = encoder->port;
3348 	unsigned long dpll_mask;
3349 
3350 	if (IS_ALDERLAKE_S(i915)) {
3351 		dpll_mask =
3352 			BIT(DPLL_ID_DG1_DPLL3) |
3353 			BIT(DPLL_ID_DG1_DPLL2) |
3354 			BIT(DPLL_ID_ICL_DPLL1) |
3355 			BIT(DPLL_ID_ICL_DPLL0);
3356 	} else if (IS_DG1(i915)) {
3357 		if (port == PORT_D || port == PORT_E) {
3358 			dpll_mask =
3359 				BIT(DPLL_ID_DG1_DPLL2) |
3360 				BIT(DPLL_ID_DG1_DPLL3);
3361 		} else {
3362 			dpll_mask =
3363 				BIT(DPLL_ID_DG1_DPLL0) |
3364 				BIT(DPLL_ID_DG1_DPLL1);
3365 		}
3366 	} else if (IS_ROCKETLAKE(i915)) {
3367 		dpll_mask =
3368 			BIT(DPLL_ID_EHL_DPLL4) |
3369 			BIT(DPLL_ID_ICL_DPLL1) |
3370 			BIT(DPLL_ID_ICL_DPLL0);
3371 	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3372 		   port != PORT_A) {
3373 		dpll_mask =
3374 			BIT(DPLL_ID_EHL_DPLL4) |
3375 			BIT(DPLL_ID_ICL_DPLL1) |
3376 			BIT(DPLL_ID_ICL_DPLL0);
3377 	} else {
3378 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3379 	}
3380 
3381 	/* Eliminate DPLLs from consideration if reserved by HTI */
3382 	dpll_mask &= ~intel_hti_dpll_mask(i915);
3383 
3384 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3385 						&port_dpll->hw_state,
3386 						dpll_mask);
3387 	if (!port_dpll->pll)
3388 		return -EINVAL;
3389 
3390 	intel_reference_shared_dpll(state, crtc,
3391 				    port_dpll->pll, &port_dpll->hw_state);
3392 
3393 	icl_update_active_dpll(state, crtc, encoder);
3394 
3395 	return 0;
3396 }
3397 
3398 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3399 				    struct intel_crtc *crtc)
3400 {
3401 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3402 	struct intel_crtc_state *crtc_state =
3403 		intel_atomic_get_new_crtc_state(state, crtc);
3404 	const struct intel_crtc_state *old_crtc_state =
3405 		intel_atomic_get_old_crtc_state(state, crtc);
3406 	struct icl_port_dpll *port_dpll =
3407 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3408 	struct skl_wrpll_params pll_params = {};
3409 	int ret;
3410 
3411 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3412 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3413 	if (ret)
3414 		return ret;
3415 
3416 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3417 
3418 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3419 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3420 	if (ret)
3421 		return ret;
3422 
3423 	/* this is mainly for the fastset check */
3424 	if (old_crtc_state->shared_dpll &&
3425 	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3426 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3427 	else
3428 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3429 
3430 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3431 							 &port_dpll->hw_state);
3432 
3433 	return 0;
3434 }
3435 
3436 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3437 				struct intel_crtc *crtc,
3438 				struct intel_encoder *encoder)
3439 {
3440 	struct intel_crtc_state *crtc_state =
3441 		intel_atomic_get_new_crtc_state(state, crtc);
3442 	struct icl_port_dpll *port_dpll =
3443 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3444 	enum intel_dpll_id dpll_id;
3445 	int ret;
3446 
3447 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3448 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3449 						&port_dpll->hw_state,
3450 						BIT(DPLL_ID_ICL_TBTPLL));
3451 	if (!port_dpll->pll)
3452 		return -EINVAL;
3453 	intel_reference_shared_dpll(state, crtc,
3454 				    port_dpll->pll, &port_dpll->hw_state);
3455 
3456 
3457 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3458 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3459 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3460 						&port_dpll->hw_state,
3461 						BIT(dpll_id));
3462 	if (!port_dpll->pll) {
3463 		ret = -EINVAL;
3464 		goto err_unreference_tbt_pll;
3465 	}
3466 	intel_reference_shared_dpll(state, crtc,
3467 				    port_dpll->pll, &port_dpll->hw_state);
3468 
3469 	icl_update_active_dpll(state, crtc, encoder);
3470 
3471 	return 0;
3472 
3473 err_unreference_tbt_pll:
3474 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3475 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3476 
3477 	return ret;
3478 }
3479 
3480 static int icl_compute_dplls(struct intel_atomic_state *state,
3481 			     struct intel_crtc *crtc,
3482 			     struct intel_encoder *encoder)
3483 {
3484 	if (intel_encoder_is_combo(encoder))
3485 		return icl_compute_combo_phy_dpll(state, crtc);
3486 	else if (intel_encoder_is_tc(encoder))
3487 		return icl_compute_tc_phy_dplls(state, crtc);
3488 
3489 	MISSING_CASE(encoder->port);
3490 
3491 	return 0;
3492 }
3493 
3494 static int icl_get_dplls(struct intel_atomic_state *state,
3495 			 struct intel_crtc *crtc,
3496 			 struct intel_encoder *encoder)
3497 {
3498 	if (intel_encoder_is_combo(encoder))
3499 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3500 	else if (intel_encoder_is_tc(encoder))
3501 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3502 
3503 	MISSING_CASE(encoder->port);
3504 
3505 	return -EINVAL;
3506 }
3507 
3508 static void icl_put_dplls(struct intel_atomic_state *state,
3509 			  struct intel_crtc *crtc)
3510 {
3511 	const struct intel_crtc_state *old_crtc_state =
3512 		intel_atomic_get_old_crtc_state(state, crtc);
3513 	struct intel_crtc_state *new_crtc_state =
3514 		intel_atomic_get_new_crtc_state(state, crtc);
3515 	enum icl_port_dpll_id id;
3516 
3517 	new_crtc_state->shared_dpll = NULL;
3518 
3519 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3520 		const struct icl_port_dpll *old_port_dpll =
3521 			&old_crtc_state->icl_port_dplls[id];
3522 		struct icl_port_dpll *new_port_dpll =
3523 			&new_crtc_state->icl_port_dplls[id];
3524 
3525 		new_port_dpll->pll = NULL;
3526 
3527 		if (!old_port_dpll->pll)
3528 			continue;
3529 
3530 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3531 	}
3532 }
3533 
3534 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3535 				struct intel_shared_dpll *pll,
3536 				struct intel_dpll_hw_state *dpll_hw_state)
3537 {
3538 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3539 	const enum intel_dpll_id id = pll->info->id;
3540 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3541 	intel_wakeref_t wakeref;
3542 	bool ret = false;
3543 	u32 val;
3544 
3545 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3546 
3547 	wakeref = intel_display_power_get_if_enabled(i915,
3548 						     POWER_DOMAIN_DISPLAY_CORE);
3549 	if (!wakeref)
3550 		return false;
3551 
3552 	val = intel_de_read(i915, enable_reg);
3553 	if (!(val & PLL_ENABLE))
3554 		goto out;
3555 
3556 	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3557 						  MG_REFCLKIN_CTL(tc_port));
3558 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3559 
3560 	hw_state->mg_clktop2_coreclkctl1 =
3561 		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3562 	hw_state->mg_clktop2_coreclkctl1 &=
3563 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3564 
3565 	hw_state->mg_clktop2_hsclkctl =
3566 		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3567 	hw_state->mg_clktop2_hsclkctl &=
3568 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3569 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3570 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3571 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3572 
3573 	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3574 	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3575 	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3576 	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3577 						   MG_PLL_FRAC_LOCK(tc_port));
3578 	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3579 
3580 	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3581 	hw_state->mg_pll_tdc_coldst_bias =
3582 		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3583 
3584 	if (i915->display.dpll.ref_clks.nssc == 38400) {
3585 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3586 		hw_state->mg_pll_bias_mask = 0;
3587 	} else {
3588 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3589 		hw_state->mg_pll_bias_mask = -1U;
3590 	}
3591 
3592 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3593 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3594 
3595 	ret = true;
3596 out:
3597 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3598 	return ret;
3599 }
3600 
3601 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3602 				 struct intel_shared_dpll *pll,
3603 				 struct intel_dpll_hw_state *dpll_hw_state)
3604 {
3605 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3606 	const enum intel_dpll_id id = pll->info->id;
3607 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3608 	intel_wakeref_t wakeref;
3609 	bool ret = false;
3610 	u32 val;
3611 
3612 	wakeref = intel_display_power_get_if_enabled(i915,
3613 						     POWER_DOMAIN_DISPLAY_CORE);
3614 	if (!wakeref)
3615 		return false;
3616 
3617 	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3618 	if (!(val & PLL_ENABLE))
3619 		goto out;
3620 
3621 	/*
3622 	 * All registers read here have the same HIP_INDEX_REG even though
3623 	 * they are on different building blocks
3624 	 */
3625 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3626 						       DKL_REFCLKIN_CTL(tc_port));
3627 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3628 
3629 	hw_state->mg_clktop2_hsclkctl =
3630 		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3631 	hw_state->mg_clktop2_hsclkctl &=
3632 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3633 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3634 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3635 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3636 
3637 	hw_state->mg_clktop2_coreclkctl1 =
3638 		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3639 	hw_state->mg_clktop2_coreclkctl1 &=
3640 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3641 
3642 	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3643 	val = DKL_PLL_DIV0_MASK;
3644 	if (i915->display.vbt.override_afc_startup)
3645 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3646 	hw_state->mg_pll_div0 &= val;
3647 
3648 	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3649 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3650 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3651 
3652 	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3653 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3654 				 DKL_PLL_SSC_STEP_LEN_MASK |
3655 				 DKL_PLL_SSC_STEP_NUM_MASK |
3656 				 DKL_PLL_SSC_EN);
3657 
3658 	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3659 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3660 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3661 
3662 	hw_state->mg_pll_tdc_coldst_bias =
3663 		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3664 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3665 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3666 
3667 	ret = true;
3668 out:
3669 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3670 	return ret;
3671 }
3672 
3673 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3674 				 struct intel_shared_dpll *pll,
3675 				 struct intel_dpll_hw_state *dpll_hw_state,
3676 				 i915_reg_t enable_reg)
3677 {
3678 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3679 	const enum intel_dpll_id id = pll->info->id;
3680 	intel_wakeref_t wakeref;
3681 	bool ret = false;
3682 	u32 val;
3683 
3684 	wakeref = intel_display_power_get_if_enabled(i915,
3685 						     POWER_DOMAIN_DISPLAY_CORE);
3686 	if (!wakeref)
3687 		return false;
3688 
3689 	val = intel_de_read(i915, enable_reg);
3690 	if (!(val & PLL_ENABLE))
3691 		goto out;
3692 
3693 	if (IS_ALDERLAKE_S(i915)) {
3694 		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3695 		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3696 	} else if (IS_DG1(i915)) {
3697 		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3698 		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3699 	} else if (IS_ROCKETLAKE(i915)) {
3700 		hw_state->cfgcr0 = intel_de_read(i915,
3701 						 RKL_DPLL_CFGCR0(id));
3702 		hw_state->cfgcr1 = intel_de_read(i915,
3703 						 RKL_DPLL_CFGCR1(id));
3704 	} else if (DISPLAY_VER(i915) >= 12) {
3705 		hw_state->cfgcr0 = intel_de_read(i915,
3706 						 TGL_DPLL_CFGCR0(id));
3707 		hw_state->cfgcr1 = intel_de_read(i915,
3708 						 TGL_DPLL_CFGCR1(id));
3709 		if (i915->display.vbt.override_afc_startup) {
3710 			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3711 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3712 		}
3713 	} else {
3714 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3715 		    id == DPLL_ID_EHL_DPLL4) {
3716 			hw_state->cfgcr0 = intel_de_read(i915,
3717 							 ICL_DPLL_CFGCR0(4));
3718 			hw_state->cfgcr1 = intel_de_read(i915,
3719 							 ICL_DPLL_CFGCR1(4));
3720 		} else {
3721 			hw_state->cfgcr0 = intel_de_read(i915,
3722 							 ICL_DPLL_CFGCR0(id));
3723 			hw_state->cfgcr1 = intel_de_read(i915,
3724 							 ICL_DPLL_CFGCR1(id));
3725 		}
3726 	}
3727 
3728 	ret = true;
3729 out:
3730 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3731 	return ret;
3732 }
3733 
3734 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3735 				   struct intel_shared_dpll *pll,
3736 				   struct intel_dpll_hw_state *dpll_hw_state)
3737 {
3738 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3739 
3740 	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
3741 }
3742 
3743 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3744 				 struct intel_shared_dpll *pll,
3745 				 struct intel_dpll_hw_state *dpll_hw_state)
3746 {
3747 	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
3748 }
3749 
3750 static void icl_dpll_write(struct drm_i915_private *i915,
3751 			   struct intel_shared_dpll *pll,
3752 			   const struct icl_dpll_hw_state *hw_state)
3753 {
3754 	const enum intel_dpll_id id = pll->info->id;
3755 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3756 
3757 	if (IS_ALDERLAKE_S(i915)) {
3758 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3759 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3760 	} else if (IS_DG1(i915)) {
3761 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3762 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3763 	} else if (IS_ROCKETLAKE(i915)) {
3764 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3765 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3766 	} else if (DISPLAY_VER(i915) >= 12) {
3767 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3768 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3769 		div0_reg = TGL_DPLL0_DIV0(id);
3770 	} else {
3771 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3772 		    id == DPLL_ID_EHL_DPLL4) {
3773 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3774 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3775 		} else {
3776 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3777 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3778 		}
3779 	}
3780 
3781 	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3782 	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3783 	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3784 			 !i915_mmio_reg_valid(div0_reg));
3785 	if (i915->display.vbt.override_afc_startup &&
3786 	    i915_mmio_reg_valid(div0_reg))
3787 		intel_de_rmw(i915, div0_reg,
3788 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3789 	intel_de_posting_read(i915, cfgcr1_reg);
3790 }
3791 
3792 static void icl_mg_pll_write(struct drm_i915_private *i915,
3793 			     struct intel_shared_dpll *pll,
3794 			     const struct icl_dpll_hw_state *hw_state)
3795 {
3796 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3797 
3798 	/*
3799 	 * Some of the following registers have reserved fields, so program
3800 	 * these with RMW based on a mask. The mask can be fixed or generated
3801 	 * during the calc/readout phase if the mask depends on some other HW
3802 	 * state like refclk, see icl_calc_mg_pll_state().
3803 	 */
3804 	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3805 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3806 
3807 	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3808 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3809 		     hw_state->mg_clktop2_coreclkctl1);
3810 
3811 	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3812 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3813 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3814 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3815 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3816 		     hw_state->mg_clktop2_hsclkctl);
3817 
3818 	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3819 	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3820 	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3821 	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3822 		       hw_state->mg_pll_frac_lock);
3823 	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3824 
3825 	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3826 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3827 
3828 	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3829 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3830 		     hw_state->mg_pll_tdc_coldst_bias);
3831 
3832 	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3833 }
3834 
3835 static void dkl_pll_write(struct drm_i915_private *i915,
3836 			  struct intel_shared_dpll *pll,
3837 			  const struct icl_dpll_hw_state *hw_state)
3838 {
3839 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3840 	u32 val;
3841 
3842 	/*
3843 	 * All registers programmed here have the same HIP_INDEX_REG even
3844 	 * though on different building block
3845 	 */
3846 	/* All the registers are RMW */
3847 	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3848 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3849 	val |= hw_state->mg_refclkin_ctl;
3850 	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3851 
3852 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3853 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3854 	val |= hw_state->mg_clktop2_coreclkctl1;
3855 	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3856 
3857 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3858 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3859 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3860 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3861 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3862 	val |= hw_state->mg_clktop2_hsclkctl;
3863 	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3864 
3865 	val = DKL_PLL_DIV0_MASK;
3866 	if (i915->display.vbt.override_afc_startup)
3867 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3868 	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3869 			  hw_state->mg_pll_div0);
3870 
3871 	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3872 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3873 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3874 	val |= hw_state->mg_pll_div1;
3875 	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3876 
3877 	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3878 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3879 		 DKL_PLL_SSC_STEP_LEN_MASK |
3880 		 DKL_PLL_SSC_STEP_NUM_MASK |
3881 		 DKL_PLL_SSC_EN);
3882 	val |= hw_state->mg_pll_ssc;
3883 	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3884 
3885 	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3886 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3887 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3888 	val |= hw_state->mg_pll_bias;
3889 	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3890 
3891 	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3892 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3893 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3894 	val |= hw_state->mg_pll_tdc_coldst_bias;
3895 	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3896 
3897 	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3898 }
3899 
3900 static void icl_pll_power_enable(struct drm_i915_private *i915,
3901 				 struct intel_shared_dpll *pll,
3902 				 i915_reg_t enable_reg)
3903 {
3904 	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3905 
3906 	/*
3907 	 * The spec says we need to "wait" but it also says it should be
3908 	 * immediate.
3909 	 */
3910 	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3911 		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3912 			pll->info->id);
3913 }
3914 
3915 static void icl_pll_enable(struct drm_i915_private *i915,
3916 			   struct intel_shared_dpll *pll,
3917 			   i915_reg_t enable_reg)
3918 {
3919 	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3920 
3921 	/* Timeout is actually 600us. */
3922 	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3923 		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3924 }
3925 
3926 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3927 {
3928 	u32 val;
3929 
3930 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3931 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3932 		return;
3933 	/*
3934 	 * Wa_16011069516:adl-p[a0]
3935 	 *
3936 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3937 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3938 	 * sanity check this assumption with a double read, which presumably
3939 	 * returns the correct value even with clock gating on.
3940 	 *
3941 	 * Instead of the usual place for workarounds we apply this one here,
3942 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3943 	 */
3944 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3945 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3946 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3947 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3948 }
3949 
3950 static void combo_pll_enable(struct drm_i915_private *i915,
3951 			     struct intel_shared_dpll *pll,
3952 			     const struct intel_dpll_hw_state *dpll_hw_state)
3953 {
3954 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3955 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3956 
3957 	icl_pll_power_enable(i915, pll, enable_reg);
3958 
3959 	icl_dpll_write(i915, pll, hw_state);
3960 
3961 	/*
3962 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3963 	 * paths should already be setting the appropriate voltage, hence we do
3964 	 * nothing here.
3965 	 */
3966 
3967 	icl_pll_enable(i915, pll, enable_reg);
3968 
3969 	adlp_cmtg_clock_gating_wa(i915, pll);
3970 
3971 	/* DVFS post sequence would be here. See the comment above. */
3972 }
3973 
3974 static void tbt_pll_enable(struct drm_i915_private *i915,
3975 			   struct intel_shared_dpll *pll,
3976 			   const struct intel_dpll_hw_state *dpll_hw_state)
3977 {
3978 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3979 
3980 	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3981 
3982 	icl_dpll_write(i915, pll, hw_state);
3983 
3984 	/*
3985 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3986 	 * paths should already be setting the appropriate voltage, hence we do
3987 	 * nothing here.
3988 	 */
3989 
3990 	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3991 
3992 	/* DVFS post sequence would be here. See the comment above. */
3993 }
3994 
3995 static void mg_pll_enable(struct drm_i915_private *i915,
3996 			  struct intel_shared_dpll *pll,
3997 			  const struct intel_dpll_hw_state *dpll_hw_state)
3998 {
3999 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4000 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4001 
4002 	icl_pll_power_enable(i915, pll, enable_reg);
4003 
4004 	if (DISPLAY_VER(i915) >= 12)
4005 		dkl_pll_write(i915, pll, hw_state);
4006 	else
4007 		icl_mg_pll_write(i915, pll, hw_state);
4008 
4009 	/*
4010 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4011 	 * paths should already be setting the appropriate voltage, hence we do
4012 	 * nothing here.
4013 	 */
4014 
4015 	icl_pll_enable(i915, pll, enable_reg);
4016 
4017 	/* DVFS post sequence would be here. See the comment above. */
4018 }
4019 
4020 static void icl_pll_disable(struct drm_i915_private *i915,
4021 			    struct intel_shared_dpll *pll,
4022 			    i915_reg_t enable_reg)
4023 {
4024 	/* The first steps are done by intel_ddi_post_disable(). */
4025 
4026 	/*
4027 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4028 	 * paths should already be setting the appropriate voltage, hence we do
4029 	 * nothing here.
4030 	 */
4031 
4032 	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
4033 
4034 	/* Timeout is actually 1us. */
4035 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
4036 		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
4037 
4038 	/* DVFS post sequence would be here. See the comment above. */
4039 
4040 	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
4041 
4042 	/*
4043 	 * The spec says we need to "wait" but it also says it should be
4044 	 * immediate.
4045 	 */
4046 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
4047 		drm_err(&i915->drm, "PLL %d Power not disabled\n",
4048 			pll->info->id);
4049 }
4050 
4051 static void combo_pll_disable(struct drm_i915_private *i915,
4052 			      struct intel_shared_dpll *pll)
4053 {
4054 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
4055 
4056 	icl_pll_disable(i915, pll, enable_reg);
4057 }
4058 
4059 static void tbt_pll_disable(struct drm_i915_private *i915,
4060 			    struct intel_shared_dpll *pll)
4061 {
4062 	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4063 }
4064 
4065 static void mg_pll_disable(struct drm_i915_private *i915,
4066 			   struct intel_shared_dpll *pll)
4067 {
4068 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4069 
4070 	icl_pll_disable(i915, pll, enable_reg);
4071 }
4072 
4073 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4074 {
4075 	/* No SSC ref */
4076 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4077 }
4078 
4079 static void icl_dump_hw_state(struct drm_printer *p,
4080 			      const struct intel_dpll_hw_state *dpll_hw_state)
4081 {
4082 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4083 
4084 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4085 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4086 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4087 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4088 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4089 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4090 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4091 		   hw_state->mg_refclkin_ctl,
4092 		   hw_state->mg_clktop2_coreclkctl1,
4093 		   hw_state->mg_clktop2_hsclkctl,
4094 		   hw_state->mg_pll_div0,
4095 		   hw_state->mg_pll_div1,
4096 		   hw_state->mg_pll_lf,
4097 		   hw_state->mg_pll_frac_lock,
4098 		   hw_state->mg_pll_ssc,
4099 		   hw_state->mg_pll_bias,
4100 		   hw_state->mg_pll_tdc_coldst_bias);
4101 }
4102 
4103 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4104 				 const struct intel_dpll_hw_state *_b)
4105 {
4106 	const struct icl_dpll_hw_state *a = &_a->icl;
4107 	const struct icl_dpll_hw_state *b = &_b->icl;
4108 
4109 	/* FIXME split combo vs. mg more thoroughly */
4110 	return a->cfgcr0 == b->cfgcr0 &&
4111 		a->cfgcr1 == b->cfgcr1 &&
4112 		a->div0 == b->div0 &&
4113 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4114 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4115 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4116 		a->mg_pll_div0 == b->mg_pll_div0 &&
4117 		a->mg_pll_div1 == b->mg_pll_div1 &&
4118 		a->mg_pll_lf == b->mg_pll_lf &&
4119 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4120 		a->mg_pll_ssc == b->mg_pll_ssc &&
4121 		a->mg_pll_bias == b->mg_pll_bias &&
4122 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4123 }
4124 
4125 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4126 	.enable = combo_pll_enable,
4127 	.disable = combo_pll_disable,
4128 	.get_hw_state = combo_pll_get_hw_state,
4129 	.get_freq = icl_ddi_combo_pll_get_freq,
4130 };
4131 
4132 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4133 	.enable = tbt_pll_enable,
4134 	.disable = tbt_pll_disable,
4135 	.get_hw_state = tbt_pll_get_hw_state,
4136 	.get_freq = icl_ddi_tbt_pll_get_freq,
4137 };
4138 
4139 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4140 	.enable = mg_pll_enable,
4141 	.disable = mg_pll_disable,
4142 	.get_hw_state = mg_pll_get_hw_state,
4143 	.get_freq = icl_ddi_mg_pll_get_freq,
4144 };
4145 
4146 static const struct dpll_info icl_plls[] = {
4147 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4148 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4149 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4150 	  .is_alt_port_dpll = true, },
4151 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4152 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4153 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4154 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4155 	{}
4156 };
4157 
4158 static const struct intel_dpll_mgr icl_pll_mgr = {
4159 	.dpll_info = icl_plls,
4160 	.compute_dplls = icl_compute_dplls,
4161 	.get_dplls = icl_get_dplls,
4162 	.put_dplls = icl_put_dplls,
4163 	.update_active_dpll = icl_update_active_dpll,
4164 	.update_ref_clks = icl_update_dpll_ref_clks,
4165 	.dump_hw_state = icl_dump_hw_state,
4166 	.compare_hw_state = icl_compare_hw_state,
4167 };
4168 
4169 static const struct dpll_info ehl_plls[] = {
4170 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4171 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4172 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4173 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4174 	{}
4175 };
4176 
4177 static const struct intel_dpll_mgr ehl_pll_mgr = {
4178 	.dpll_info = ehl_plls,
4179 	.compute_dplls = icl_compute_dplls,
4180 	.get_dplls = icl_get_dplls,
4181 	.put_dplls = icl_put_dplls,
4182 	.update_ref_clks = icl_update_dpll_ref_clks,
4183 	.dump_hw_state = icl_dump_hw_state,
4184 	.compare_hw_state = icl_compare_hw_state,
4185 };
4186 
4187 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4188 	.enable = mg_pll_enable,
4189 	.disable = mg_pll_disable,
4190 	.get_hw_state = dkl_pll_get_hw_state,
4191 	.get_freq = icl_ddi_mg_pll_get_freq,
4192 };
4193 
4194 static const struct dpll_info tgl_plls[] = {
4195 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4196 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4197 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4198 	  .is_alt_port_dpll = true, },
4199 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4200 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4201 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4202 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4203 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4204 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4205 	{}
4206 };
4207 
4208 static const struct intel_dpll_mgr tgl_pll_mgr = {
4209 	.dpll_info = tgl_plls,
4210 	.compute_dplls = icl_compute_dplls,
4211 	.get_dplls = icl_get_dplls,
4212 	.put_dplls = icl_put_dplls,
4213 	.update_active_dpll = icl_update_active_dpll,
4214 	.update_ref_clks = icl_update_dpll_ref_clks,
4215 	.dump_hw_state = icl_dump_hw_state,
4216 	.compare_hw_state = icl_compare_hw_state,
4217 };
4218 
4219 static const struct dpll_info rkl_plls[] = {
4220 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4221 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4222 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4223 	{}
4224 };
4225 
4226 static const struct intel_dpll_mgr rkl_pll_mgr = {
4227 	.dpll_info = rkl_plls,
4228 	.compute_dplls = icl_compute_dplls,
4229 	.get_dplls = icl_get_dplls,
4230 	.put_dplls = icl_put_dplls,
4231 	.update_ref_clks = icl_update_dpll_ref_clks,
4232 	.dump_hw_state = icl_dump_hw_state,
4233 	.compare_hw_state = icl_compare_hw_state,
4234 };
4235 
4236 static const struct dpll_info dg1_plls[] = {
4237 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4238 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4239 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4240 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4241 	{}
4242 };
4243 
4244 static const struct intel_dpll_mgr dg1_pll_mgr = {
4245 	.dpll_info = dg1_plls,
4246 	.compute_dplls = icl_compute_dplls,
4247 	.get_dplls = icl_get_dplls,
4248 	.put_dplls = icl_put_dplls,
4249 	.update_ref_clks = icl_update_dpll_ref_clks,
4250 	.dump_hw_state = icl_dump_hw_state,
4251 	.compare_hw_state = icl_compare_hw_state,
4252 };
4253 
4254 static const struct dpll_info adls_plls[] = {
4255 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4256 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4257 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4258 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4259 	{}
4260 };
4261 
4262 static const struct intel_dpll_mgr adls_pll_mgr = {
4263 	.dpll_info = adls_plls,
4264 	.compute_dplls = icl_compute_dplls,
4265 	.get_dplls = icl_get_dplls,
4266 	.put_dplls = icl_put_dplls,
4267 	.update_ref_clks = icl_update_dpll_ref_clks,
4268 	.dump_hw_state = icl_dump_hw_state,
4269 	.compare_hw_state = icl_compare_hw_state,
4270 };
4271 
4272 static const struct dpll_info adlp_plls[] = {
4273 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4274 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4275 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4276 	  .is_alt_port_dpll = true, },
4277 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4278 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4279 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4280 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4281 	{}
4282 };
4283 
4284 static const struct intel_dpll_mgr adlp_pll_mgr = {
4285 	.dpll_info = adlp_plls,
4286 	.compute_dplls = icl_compute_dplls,
4287 	.get_dplls = icl_get_dplls,
4288 	.put_dplls = icl_put_dplls,
4289 	.update_active_dpll = icl_update_active_dpll,
4290 	.update_ref_clks = icl_update_dpll_ref_clks,
4291 	.dump_hw_state = icl_dump_hw_state,
4292 	.compare_hw_state = icl_compare_hw_state,
4293 };
4294 
4295 /**
4296  * intel_shared_dpll_init - Initialize shared DPLLs
4297  * @i915: i915 device
4298  *
4299  * Initialize shared DPLLs for @i915.
4300  */
4301 void intel_shared_dpll_init(struct drm_i915_private *i915)
4302 {
4303 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4304 	const struct dpll_info *dpll_info;
4305 	int i;
4306 
4307 	mutex_init(&i915->display.dpll.lock);
4308 
4309 	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4310 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4311 		dpll_mgr = NULL;
4312 	else if (IS_ALDERLAKE_P(i915))
4313 		dpll_mgr = &adlp_pll_mgr;
4314 	else if (IS_ALDERLAKE_S(i915))
4315 		dpll_mgr = &adls_pll_mgr;
4316 	else if (IS_DG1(i915))
4317 		dpll_mgr = &dg1_pll_mgr;
4318 	else if (IS_ROCKETLAKE(i915))
4319 		dpll_mgr = &rkl_pll_mgr;
4320 	else if (DISPLAY_VER(i915) >= 12)
4321 		dpll_mgr = &tgl_pll_mgr;
4322 	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4323 		dpll_mgr = &ehl_pll_mgr;
4324 	else if (DISPLAY_VER(i915) >= 11)
4325 		dpll_mgr = &icl_pll_mgr;
4326 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4327 		dpll_mgr = &bxt_pll_mgr;
4328 	else if (DISPLAY_VER(i915) == 9)
4329 		dpll_mgr = &skl_pll_mgr;
4330 	else if (HAS_DDI(i915))
4331 		dpll_mgr = &hsw_pll_mgr;
4332 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4333 		dpll_mgr = &pch_pll_mgr;
4334 
4335 	if (!dpll_mgr)
4336 		return;
4337 
4338 	dpll_info = dpll_mgr->dpll_info;
4339 
4340 	for (i = 0; dpll_info[i].name; i++) {
4341 		if (drm_WARN_ON(&i915->drm,
4342 				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4343 			break;
4344 
4345 		/* must fit into unsigned long bitmask on 32bit */
4346 		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4347 			break;
4348 
4349 		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4350 		i915->display.dpll.shared_dplls[i].index = i;
4351 	}
4352 
4353 	i915->display.dpll.mgr = dpll_mgr;
4354 	i915->display.dpll.num_shared_dpll = i;
4355 }
4356 
4357 /**
4358  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4359  * @state: atomic state
4360  * @crtc: CRTC to compute DPLLs for
4361  * @encoder: encoder
4362  *
4363  * This function computes the DPLL state for the given CRTC and encoder.
4364  *
4365  * The new configuration in the atomic commit @state is made effective by
4366  * calling intel_shared_dpll_swap_state().
4367  *
4368  * Returns:
4369  * 0 on success, negative error code on falure.
4370  */
4371 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4372 			       struct intel_crtc *crtc,
4373 			       struct intel_encoder *encoder)
4374 {
4375 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4376 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4377 
4378 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4379 		return -EINVAL;
4380 
4381 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4382 }
4383 
4384 /**
4385  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4386  * @state: atomic state
4387  * @crtc: CRTC to reserve DPLLs for
4388  * @encoder: encoder
4389  *
4390  * This function reserves all required DPLLs for the given CRTC and encoder
4391  * combination in the current atomic commit @state and the new @crtc atomic
4392  * state.
4393  *
4394  * The new configuration in the atomic commit @state is made effective by
4395  * calling intel_shared_dpll_swap_state().
4396  *
4397  * The reserved DPLLs should be released by calling
4398  * intel_release_shared_dplls().
4399  *
4400  * Returns:
4401  * 0 if all required DPLLs were successfully reserved,
4402  * negative error code otherwise.
4403  */
4404 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4405 			       struct intel_crtc *crtc,
4406 			       struct intel_encoder *encoder)
4407 {
4408 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4409 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4410 
4411 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4412 		return -EINVAL;
4413 
4414 	return dpll_mgr->get_dplls(state, crtc, encoder);
4415 }
4416 
4417 /**
4418  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4419  * @state: atomic state
4420  * @crtc: crtc from which the DPLLs are to be released
4421  *
4422  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4423  * from the current atomic commit @state and the old @crtc atomic state.
4424  *
4425  * The new configuration in the atomic commit @state is made effective by
4426  * calling intel_shared_dpll_swap_state().
4427  */
4428 void intel_release_shared_dplls(struct intel_atomic_state *state,
4429 				struct intel_crtc *crtc)
4430 {
4431 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4432 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4433 
4434 	/*
4435 	 * FIXME: this function is called for every platform having a
4436 	 * compute_clock hook, even though the platform doesn't yet support
4437 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4438 	 * called on those.
4439 	 */
4440 	if (!dpll_mgr)
4441 		return;
4442 
4443 	dpll_mgr->put_dplls(state, crtc);
4444 }
4445 
4446 /**
4447  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4448  * @state: atomic state
4449  * @crtc: the CRTC for which to update the active DPLL
4450  * @encoder: encoder determining the type of port DPLL
4451  *
4452  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4453  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4454  * DPLL selected will be based on the current mode of the encoder's port.
4455  */
4456 void intel_update_active_dpll(struct intel_atomic_state *state,
4457 			      struct intel_crtc *crtc,
4458 			      struct intel_encoder *encoder)
4459 {
4460 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4461 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4462 
4463 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4464 		return;
4465 
4466 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4467 }
4468 
4469 /**
4470  * intel_dpll_get_freq - calculate the DPLL's output frequency
4471  * @i915: i915 device
4472  * @pll: DPLL for which to calculate the output frequency
4473  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4474  *
4475  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4476  */
4477 int intel_dpll_get_freq(struct drm_i915_private *i915,
4478 			const struct intel_shared_dpll *pll,
4479 			const struct intel_dpll_hw_state *dpll_hw_state)
4480 {
4481 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4482 		return 0;
4483 
4484 	return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
4485 }
4486 
4487 /**
4488  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4489  * @i915: i915 device
4490  * @pll: DPLL for which to calculate the output frequency
4491  * @dpll_hw_state: DPLL's hardware state
4492  *
4493  * Read out @pll's hardware state into @dpll_hw_state.
4494  */
4495 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4496 			     struct intel_shared_dpll *pll,
4497 			     struct intel_dpll_hw_state *dpll_hw_state)
4498 {
4499 	return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
4500 }
4501 
4502 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4503 				  struct intel_shared_dpll *pll)
4504 {
4505 	struct intel_crtc *crtc;
4506 
4507 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4508 
4509 	if (pll->on && pll->info->power_domain)
4510 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4511 
4512 	pll->state.pipe_mask = 0;
4513 	for_each_intel_crtc(&i915->drm, crtc) {
4514 		struct intel_crtc_state *crtc_state =
4515 			to_intel_crtc_state(crtc->base.state);
4516 
4517 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4518 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4519 	}
4520 	pll->active_mask = pll->state.pipe_mask;
4521 
4522 	drm_dbg_kms(&i915->drm,
4523 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4524 		    pll->info->name, pll->state.pipe_mask, pll->on);
4525 }
4526 
4527 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4528 {
4529 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4530 		i915->display.dpll.mgr->update_ref_clks(i915);
4531 }
4532 
4533 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4534 {
4535 	struct intel_shared_dpll *pll;
4536 	int i;
4537 
4538 	for_each_shared_dpll(i915, pll, i)
4539 		readout_dpll_hw_state(i915, pll);
4540 }
4541 
4542 static void sanitize_dpll_state(struct drm_i915_private *i915,
4543 				struct intel_shared_dpll *pll)
4544 {
4545 	if (!pll->on)
4546 		return;
4547 
4548 	adlp_cmtg_clock_gating_wa(i915, pll);
4549 
4550 	if (pll->active_mask)
4551 		return;
4552 
4553 	drm_dbg_kms(&i915->drm,
4554 		    "%s enabled but not in use, disabling\n",
4555 		    pll->info->name);
4556 
4557 	_intel_disable_shared_dpll(i915, pll);
4558 }
4559 
4560 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4561 {
4562 	struct intel_shared_dpll *pll;
4563 	int i;
4564 
4565 	for_each_shared_dpll(i915, pll, i)
4566 		sanitize_dpll_state(i915, pll);
4567 }
4568 
4569 /**
4570  * intel_dpll_dump_hw_state - dump hw_state
4571  * @i915: i915 drm device
4572  * @p: where to print the state to
4573  * @dpll_hw_state: hw state to be dumped
4574  *
4575  * Dumo out the relevant values in @dpll_hw_state.
4576  */
4577 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4578 			      struct drm_printer *p,
4579 			      const struct intel_dpll_hw_state *dpll_hw_state)
4580 {
4581 	if (i915->display.dpll.mgr) {
4582 		i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
4583 	} else {
4584 		/* fallback for platforms that don't use the shared dpll
4585 		 * infrastructure
4586 		 */
4587 		ibx_dump_hw_state(p, dpll_hw_state);
4588 	}
4589 }
4590 
4591 /**
4592  * intel_dpll_compare_hw_state - compare the two states
4593  * @i915: i915 drm device
4594  * @a: first DPLL hw state
4595  * @b: second DPLL hw state
4596  *
4597  * Compare DPLL hw states @a and @b.
4598  *
4599  * Returns: true if the states are equal, false if the differ
4600  */
4601 bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4602 				 const struct intel_dpll_hw_state *a,
4603 				 const struct intel_dpll_hw_state *b)
4604 {
4605 	if (i915->display.dpll.mgr) {
4606 		return i915->display.dpll.mgr->compare_hw_state(a, b);
4607 	} else {
4608 		/* fallback for platforms that don't use the shared dpll
4609 		 * infrastructure
4610 		 */
4611 		return ibx_compare_hw_state(a, b);
4612 	}
4613 }
4614 
4615 static void
4616 verify_single_dpll_state(struct drm_i915_private *i915,
4617 			 struct intel_shared_dpll *pll,
4618 			 struct intel_crtc *crtc,
4619 			 const struct intel_crtc_state *new_crtc_state)
4620 {
4621 	struct intel_dpll_hw_state dpll_hw_state = {};
4622 	u8 pipe_mask;
4623 	bool active;
4624 
4625 	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4626 
4627 	if (!pll->info->always_on) {
4628 		I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4629 				"%s: pll in active use but not on in sw tracking\n",
4630 				pll->info->name);
4631 		I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4632 				"%s: pll is on but not used by any active pipe\n",
4633 				pll->info->name);
4634 		I915_STATE_WARN(i915, pll->on != active,
4635 				"%s: pll on state mismatch (expected %i, found %i)\n",
4636 				pll->info->name, pll->on, active);
4637 	}
4638 
4639 	if (!crtc) {
4640 		I915_STATE_WARN(i915,
4641 				pll->active_mask & ~pll->state.pipe_mask,
4642 				"%s: more active pll users than references: 0x%x vs 0x%x\n",
4643 				pll->info->name, pll->active_mask, pll->state.pipe_mask);
4644 
4645 		return;
4646 	}
4647 
4648 	pipe_mask = BIT(crtc->pipe);
4649 
4650 	if (new_crtc_state->hw.active)
4651 		I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4652 				"%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4653 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4654 	else
4655 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4656 				"%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4657 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4658 
4659 	I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4660 			"%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4661 			pll->info->name, pipe_mask, pll->state.pipe_mask);
4662 
4663 	I915_STATE_WARN(i915,
4664 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4665 					  sizeof(dpll_hw_state)),
4666 			"%s: pll hw state mismatch\n",
4667 			pll->info->name);
4668 }
4669 
4670 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4671 			      const struct intel_shared_dpll *new_pll)
4672 {
4673 	return old_pll && new_pll && old_pll != new_pll &&
4674 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4675 }
4676 
4677 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4678 				    struct intel_crtc *crtc)
4679 {
4680 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4681 	const struct intel_crtc_state *old_crtc_state =
4682 		intel_atomic_get_old_crtc_state(state, crtc);
4683 	const struct intel_crtc_state *new_crtc_state =
4684 		intel_atomic_get_new_crtc_state(state, crtc);
4685 
4686 	if (new_crtc_state->shared_dpll)
4687 		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4688 					 crtc, new_crtc_state);
4689 
4690 	if (old_crtc_state->shared_dpll &&
4691 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4692 		u8 pipe_mask = BIT(crtc->pipe);
4693 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4694 
4695 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4696 				"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4697 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4698 
4699 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4700 		I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4701 							 new_crtc_state->shared_dpll) &&
4702 				pll->state.pipe_mask & pipe_mask,
4703 				"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4704 				pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4705 	}
4706 }
4707 
4708 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4709 {
4710 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4711 	struct intel_shared_dpll *pll;
4712 	int i;
4713 
4714 	for_each_shared_dpll(i915, pll, i)
4715 		verify_single_dpll_state(i915, pll, NULL, NULL);
4716 }
4717