xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "i915_reg.h"
28 #include "intel_de.h"
29 #include "intel_display_types.h"
30 #include "intel_dkl_phy.h"
31 #include "intel_dkl_phy_regs.h"
32 #include "intel_dpio_phy.h"
33 #include "intel_dpll.h"
34 #include "intel_dpll_mgr.h"
35 #include "intel_hti.h"
36 #include "intel_mg_phy_regs.h"
37 #include "intel_pch_refclk.h"
38 #include "intel_tc.h"
39 
40 /**
41  * DOC: Display PLLs
42  *
43  * Display PLLs used for driving outputs vary by platform. While some have
44  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45  * from a pool. In the latter scenario, it is possible that multiple pipes
46  * share a PLL if their configurations match.
47  *
48  * This file provides an abstraction over display PLLs. The function
49  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
50  * users of a PLL are tracked and that tracking is integrated with the atomic
51  * modset interface. During an atomic operation, required PLLs can be reserved
52  * for a given CRTC and encoder configuration by calling
53  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54  * with intel_release_shared_dplls().
55  * Changes to the users are first staged in the atomic state, and then made
56  * effective by calling intel_shared_dpll_swap_state() during the atomic
57  * commit phase.
58  */
59 
60 /* platform specific hooks for managing DPLLs */
61 struct intel_shared_dpll_funcs {
62 	/*
63 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 	 * the pll is not already enabled.
65 	 */
66 	void (*enable)(struct drm_i915_private *i915,
67 		       struct intel_shared_dpll *pll);
68 
69 	/*
70 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 	 * only when it is safe to disable the pll, i.e., there are no more
72 	 * tracked users for it.
73 	 */
74 	void (*disable)(struct drm_i915_private *i915,
75 			struct intel_shared_dpll *pll);
76 
77 	/*
78 	 * Hook for reading the values currently programmed to the DPLL
79 	 * registers. This is used for initial hw state readout and state
80 	 * verification after a mode set.
81 	 */
82 	bool (*get_hw_state)(struct drm_i915_private *i915,
83 			     struct intel_shared_dpll *pll,
84 			     struct intel_dpll_hw_state *hw_state);
85 
86 	/*
87 	 * Hook for calculating the pll's output frequency based on its passed
88 	 * in state.
89 	 */
90 	int (*get_freq)(struct drm_i915_private *i915,
91 			const struct intel_shared_dpll *pll,
92 			const struct intel_dpll_hw_state *pll_state);
93 };
94 
95 struct intel_dpll_mgr {
96 	const struct dpll_info *dpll_info;
97 
98 	int (*compute_dplls)(struct intel_atomic_state *state,
99 			     struct intel_crtc *crtc,
100 			     struct intel_encoder *encoder);
101 	int (*get_dplls)(struct intel_atomic_state *state,
102 			 struct intel_crtc *crtc,
103 			 struct intel_encoder *encoder);
104 	void (*put_dplls)(struct intel_atomic_state *state,
105 			  struct intel_crtc *crtc);
106 	void (*update_active_dpll)(struct intel_atomic_state *state,
107 				   struct intel_crtc *crtc,
108 				   struct intel_encoder *encoder);
109 	void (*update_ref_clks)(struct drm_i915_private *i915);
110 	void (*dump_hw_state)(struct drm_i915_private *i915,
111 			      const struct intel_dpll_hw_state *hw_state);
112 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
113 				 const struct intel_dpll_hw_state *b);
114 };
115 
116 static void
117 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
118 				  struct intel_shared_dpll_state *shared_dpll)
119 {
120 	struct intel_shared_dpll *pll;
121 	int i;
122 
123 	/* Copy shared dpll state */
124 	for_each_shared_dpll(i915, pll, i)
125 		shared_dpll[pll->index] = pll->state;
126 }
127 
128 static struct intel_shared_dpll_state *
129 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
130 {
131 	struct intel_atomic_state *state = to_intel_atomic_state(s);
132 
133 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
134 
135 	if (!state->dpll_set) {
136 		state->dpll_set = true;
137 
138 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
139 						  state->shared_dpll);
140 	}
141 
142 	return state->shared_dpll;
143 }
144 
145 /**
146  * intel_get_shared_dpll_by_id - get a DPLL given its id
147  * @i915: i915 device instance
148  * @id: pll id
149  *
150  * Returns:
151  * A pointer to the DPLL with @id
152  */
153 struct intel_shared_dpll *
154 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
155 			    enum intel_dpll_id id)
156 {
157 	struct intel_shared_dpll *pll;
158 	int i;
159 
160 	for_each_shared_dpll(i915, pll, i) {
161 		if (pll->info->id == id)
162 			return pll;
163 	}
164 
165 	MISSING_CASE(id);
166 	return NULL;
167 }
168 
169 /* For ILK+ */
170 void assert_shared_dpll(struct drm_i915_private *i915,
171 			struct intel_shared_dpll *pll,
172 			bool state)
173 {
174 	bool cur_state;
175 	struct intel_dpll_hw_state hw_state;
176 
177 	if (drm_WARN(&i915->drm, !pll,
178 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
179 		return;
180 
181 	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
182 	I915_STATE_WARN(i915, cur_state != state,
183 			"%s assertion failure (expected %s, current %s)\n",
184 			pll->info->name, str_on_off(state),
185 			str_on_off(cur_state));
186 }
187 
188 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
189 {
190 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
191 }
192 
193 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
194 {
195 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
196 }
197 
198 static i915_reg_t
199 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
200 			   struct intel_shared_dpll *pll)
201 {
202 	if (IS_DG1(i915))
203 		return DG1_DPLL_ENABLE(pll->info->id);
204 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
205 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
206 		return MG_PLL_ENABLE(0);
207 
208 	return ICL_DPLL_ENABLE(pll->info->id);
209 }
210 
211 static i915_reg_t
212 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
213 			struct intel_shared_dpll *pll)
214 {
215 	const enum intel_dpll_id id = pll->info->id;
216 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
217 
218 	if (IS_ALDERLAKE_P(i915))
219 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
220 
221 	return MG_PLL_ENABLE(tc_port);
222 }
223 
224 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
225 				      struct intel_shared_dpll *pll)
226 {
227 	if (pll->info->power_domain)
228 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
229 
230 	pll->info->funcs->enable(i915, pll);
231 	pll->on = true;
232 }
233 
234 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
235 				       struct intel_shared_dpll *pll)
236 {
237 	pll->info->funcs->disable(i915, pll);
238 	pll->on = false;
239 
240 	if (pll->info->power_domain)
241 		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
242 }
243 
244 /**
245  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
246  * @crtc_state: CRTC, and its state, which has a shared DPLL
247  *
248  * Enable the shared DPLL used by @crtc.
249  */
250 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
251 {
252 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
253 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
254 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
255 	unsigned int pipe_mask = BIT(crtc->pipe);
256 	unsigned int old_mask;
257 
258 	if (drm_WARN_ON(&i915->drm, pll == NULL))
259 		return;
260 
261 	mutex_lock(&i915->display.dpll.lock);
262 	old_mask = pll->active_mask;
263 
264 	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
265 	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
266 		goto out;
267 
268 	pll->active_mask |= pipe_mask;
269 
270 	drm_dbg_kms(&i915->drm,
271 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
272 		    pll->info->name, pll->active_mask, pll->on,
273 		    crtc->base.base.id, crtc->base.name);
274 
275 	if (old_mask) {
276 		drm_WARN_ON(&i915->drm, !pll->on);
277 		assert_shared_dpll_enabled(i915, pll);
278 		goto out;
279 	}
280 	drm_WARN_ON(&i915->drm, pll->on);
281 
282 	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
283 
284 	_intel_enable_shared_dpll(i915, pll);
285 
286 out:
287 	mutex_unlock(&i915->display.dpll.lock);
288 }
289 
290 /**
291  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
292  * @crtc_state: CRTC, and its state, which has a shared DPLL
293  *
294  * Disable the shared DPLL used by @crtc.
295  */
296 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
297 {
298 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
299 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
300 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
301 	unsigned int pipe_mask = BIT(crtc->pipe);
302 
303 	/* PCH only available on ILK+ */
304 	if (DISPLAY_VER(i915) < 5)
305 		return;
306 
307 	if (pll == NULL)
308 		return;
309 
310 	mutex_lock(&i915->display.dpll.lock);
311 	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
312 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
313 		     crtc->base.base.id, crtc->base.name))
314 		goto out;
315 
316 	drm_dbg_kms(&i915->drm,
317 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
318 		    pll->info->name, pll->active_mask, pll->on,
319 		    crtc->base.base.id, crtc->base.name);
320 
321 	assert_shared_dpll_enabled(i915, pll);
322 	drm_WARN_ON(&i915->drm, !pll->on);
323 
324 	pll->active_mask &= ~pipe_mask;
325 	if (pll->active_mask)
326 		goto out;
327 
328 	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
329 
330 	_intel_disable_shared_dpll(i915, pll);
331 
332 out:
333 	mutex_unlock(&i915->display.dpll.lock);
334 }
335 
336 static unsigned long
337 intel_dpll_mask_all(struct drm_i915_private *i915)
338 {
339 	struct intel_shared_dpll *pll;
340 	unsigned long dpll_mask = 0;
341 	int i;
342 
343 	for_each_shared_dpll(i915, pll, i) {
344 		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
345 
346 		dpll_mask |= BIT(pll->info->id);
347 	}
348 
349 	return dpll_mask;
350 }
351 
352 static struct intel_shared_dpll *
353 intel_find_shared_dpll(struct intel_atomic_state *state,
354 		       const struct intel_crtc *crtc,
355 		       const struct intel_dpll_hw_state *pll_state,
356 		       unsigned long dpll_mask)
357 {
358 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
359 	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
360 	struct intel_shared_dpll_state *shared_dpll;
361 	struct intel_shared_dpll *unused_pll = NULL;
362 	enum intel_dpll_id id;
363 
364 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
365 
366 	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
367 
368 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
369 		struct intel_shared_dpll *pll;
370 
371 		pll = intel_get_shared_dpll_by_id(i915, id);
372 		if (!pll)
373 			continue;
374 
375 		/* Only want to check enabled timings first */
376 		if (shared_dpll[pll->index].pipe_mask == 0) {
377 			if (!unused_pll)
378 				unused_pll = pll;
379 			continue;
380 		}
381 
382 		if (memcmp(pll_state,
383 			   &shared_dpll[pll->index].hw_state,
384 			   sizeof(*pll_state)) == 0) {
385 			drm_dbg_kms(&i915->drm,
386 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
387 				    crtc->base.base.id, crtc->base.name,
388 				    pll->info->name,
389 				    shared_dpll[pll->index].pipe_mask,
390 				    pll->active_mask);
391 			return pll;
392 		}
393 	}
394 
395 	/* Ok no matching timings, maybe there's a free one? */
396 	if (unused_pll) {
397 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
398 			    crtc->base.base.id, crtc->base.name,
399 			    unused_pll->info->name);
400 		return unused_pll;
401 	}
402 
403 	return NULL;
404 }
405 
406 /**
407  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
408  * @crtc: CRTC on which behalf the reference is taken
409  * @pll: DPLL for which the reference is taken
410  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
411  *
412  * Take a reference for @pll tracking the use of it by @crtc.
413  */
414 static void
415 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
416 				 const struct intel_shared_dpll *pll,
417 				 struct intel_shared_dpll_state *shared_dpll_state)
418 {
419 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
420 
421 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
422 
423 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
424 
425 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
426 		    crtc->base.base.id, crtc->base.name, pll->info->name);
427 }
428 
429 static void
430 intel_reference_shared_dpll(struct intel_atomic_state *state,
431 			    const struct intel_crtc *crtc,
432 			    const struct intel_shared_dpll *pll,
433 			    const struct intel_dpll_hw_state *pll_state)
434 {
435 	struct intel_shared_dpll_state *shared_dpll;
436 
437 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
438 
439 	if (shared_dpll[pll->index].pipe_mask == 0)
440 		shared_dpll[pll->index].hw_state = *pll_state;
441 
442 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
443 }
444 
445 /**
446  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
447  * @crtc: CRTC on which behalf the reference is dropped
448  * @pll: DPLL for which the reference is dropped
449  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
450  *
451  * Drop a reference for @pll tracking the end of use of it by @crtc.
452  */
453 void
454 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
455 				   const struct intel_shared_dpll *pll,
456 				   struct intel_shared_dpll_state *shared_dpll_state)
457 {
458 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
459 
460 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
461 
462 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
463 
464 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
465 		    crtc->base.base.id, crtc->base.name, pll->info->name);
466 }
467 
468 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
469 					  const struct intel_crtc *crtc,
470 					  const struct intel_shared_dpll *pll)
471 {
472 	struct intel_shared_dpll_state *shared_dpll;
473 
474 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
475 
476 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
477 }
478 
479 static void intel_put_dpll(struct intel_atomic_state *state,
480 			   struct intel_crtc *crtc)
481 {
482 	const struct intel_crtc_state *old_crtc_state =
483 		intel_atomic_get_old_crtc_state(state, crtc);
484 	struct intel_crtc_state *new_crtc_state =
485 		intel_atomic_get_new_crtc_state(state, crtc);
486 
487 	new_crtc_state->shared_dpll = NULL;
488 
489 	if (!old_crtc_state->shared_dpll)
490 		return;
491 
492 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
493 }
494 
495 /**
496  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
497  * @state: atomic state
498  *
499  * This is the dpll version of drm_atomic_helper_swap_state() since the
500  * helper does not handle driver-specific global state.
501  *
502  * For consistency with atomic helpers this function does a complete swap,
503  * i.e. it also puts the current state into @state, even though there is no
504  * need for that at this moment.
505  */
506 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
507 {
508 	struct drm_i915_private *i915 = to_i915(state->base.dev);
509 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
510 	struct intel_shared_dpll *pll;
511 	int i;
512 
513 	if (!state->dpll_set)
514 		return;
515 
516 	for_each_shared_dpll(i915, pll, i)
517 		swap(pll->state, shared_dpll[pll->index]);
518 }
519 
520 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
521 				      struct intel_shared_dpll *pll,
522 				      struct intel_dpll_hw_state *hw_state)
523 {
524 	const enum intel_dpll_id id = pll->info->id;
525 	intel_wakeref_t wakeref;
526 	u32 val;
527 
528 	wakeref = intel_display_power_get_if_enabled(i915,
529 						     POWER_DOMAIN_DISPLAY_CORE);
530 	if (!wakeref)
531 		return false;
532 
533 	val = intel_de_read(i915, PCH_DPLL(id));
534 	hw_state->dpll = val;
535 	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
536 	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
537 
538 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
539 
540 	return val & DPLL_VCO_ENABLE;
541 }
542 
543 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
544 {
545 	u32 val;
546 	bool enabled;
547 
548 	val = intel_de_read(i915, PCH_DREF_CONTROL);
549 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
550 			    DREF_SUPERSPREAD_SOURCE_MASK));
551 	I915_STATE_WARN(i915, !enabled,
552 			"PCH refclk assertion failure, should be active but is disabled\n");
553 }
554 
555 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
556 				struct intel_shared_dpll *pll)
557 {
558 	const enum intel_dpll_id id = pll->info->id;
559 
560 	/* PCH refclock must be enabled first */
561 	ibx_assert_pch_refclk_enabled(i915);
562 
563 	intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
564 	intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
565 
566 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
567 
568 	/* Wait for the clocks to stabilize. */
569 	intel_de_posting_read(i915, PCH_DPLL(id));
570 	udelay(150);
571 
572 	/* The pixel multiplier can only be updated once the
573 	 * DPLL is enabled and the clocks are stable.
574 	 *
575 	 * So write it again.
576 	 */
577 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
578 	intel_de_posting_read(i915, PCH_DPLL(id));
579 	udelay(200);
580 }
581 
582 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
583 				 struct intel_shared_dpll *pll)
584 {
585 	const enum intel_dpll_id id = pll->info->id;
586 
587 	intel_de_write(i915, PCH_DPLL(id), 0);
588 	intel_de_posting_read(i915, PCH_DPLL(id));
589 	udelay(200);
590 }
591 
592 static int ibx_compute_dpll(struct intel_atomic_state *state,
593 			    struct intel_crtc *crtc,
594 			    struct intel_encoder *encoder)
595 {
596 	return 0;
597 }
598 
599 static int ibx_get_dpll(struct intel_atomic_state *state,
600 			struct intel_crtc *crtc,
601 			struct intel_encoder *encoder)
602 {
603 	struct intel_crtc_state *crtc_state =
604 		intel_atomic_get_new_crtc_state(state, crtc);
605 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
606 	struct intel_shared_dpll *pll;
607 	enum intel_dpll_id id;
608 
609 	if (HAS_PCH_IBX(i915)) {
610 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
611 		id = (enum intel_dpll_id) crtc->pipe;
612 		pll = intel_get_shared_dpll_by_id(i915, id);
613 
614 		drm_dbg_kms(&i915->drm,
615 			    "[CRTC:%d:%s] using pre-allocated %s\n",
616 			    crtc->base.base.id, crtc->base.name,
617 			    pll->info->name);
618 	} else {
619 		pll = intel_find_shared_dpll(state, crtc,
620 					     &crtc_state->dpll_hw_state,
621 					     BIT(DPLL_ID_PCH_PLL_B) |
622 					     BIT(DPLL_ID_PCH_PLL_A));
623 	}
624 
625 	if (!pll)
626 		return -EINVAL;
627 
628 	/* reference the pll */
629 	intel_reference_shared_dpll(state, crtc,
630 				    pll, &crtc_state->dpll_hw_state);
631 
632 	crtc_state->shared_dpll = pll;
633 
634 	return 0;
635 }
636 
637 static void ibx_dump_hw_state(struct drm_i915_private *i915,
638 			      const struct intel_dpll_hw_state *hw_state)
639 {
640 	drm_dbg_kms(&i915->drm,
641 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
642 		    "fp0: 0x%x, fp1: 0x%x\n",
643 		    hw_state->dpll,
644 		    hw_state->dpll_md,
645 		    hw_state->fp0,
646 		    hw_state->fp1);
647 }
648 
649 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
650 				 const struct intel_dpll_hw_state *b)
651 {
652 	return a->dpll == b->dpll &&
653 		a->dpll_md == b->dpll_md &&
654 		a->fp0 == b->fp0 &&
655 		a->fp1 == b->fp1;
656 }
657 
658 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
659 	.enable = ibx_pch_dpll_enable,
660 	.disable = ibx_pch_dpll_disable,
661 	.get_hw_state = ibx_pch_dpll_get_hw_state,
662 };
663 
664 static const struct dpll_info pch_plls[] = {
665 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
666 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
667 	{}
668 };
669 
670 static const struct intel_dpll_mgr pch_pll_mgr = {
671 	.dpll_info = pch_plls,
672 	.compute_dplls = ibx_compute_dpll,
673 	.get_dplls = ibx_get_dpll,
674 	.put_dplls = intel_put_dpll,
675 	.dump_hw_state = ibx_dump_hw_state,
676 	.compare_hw_state = ibx_compare_hw_state,
677 };
678 
679 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
680 				 struct intel_shared_dpll *pll)
681 {
682 	const enum intel_dpll_id id = pll->info->id;
683 
684 	intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
685 	intel_de_posting_read(i915, WRPLL_CTL(id));
686 	udelay(20);
687 }
688 
689 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
690 				struct intel_shared_dpll *pll)
691 {
692 	intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
693 	intel_de_posting_read(i915, SPLL_CTL);
694 	udelay(20);
695 }
696 
697 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
698 				  struct intel_shared_dpll *pll)
699 {
700 	const enum intel_dpll_id id = pll->info->id;
701 
702 	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
703 	intel_de_posting_read(i915, WRPLL_CTL(id));
704 
705 	/*
706 	 * Try to set up the PCH reference clock once all DPLLs
707 	 * that depend on it have been shut down.
708 	 */
709 	if (i915->display.dpll.pch_ssc_use & BIT(id))
710 		intel_init_pch_refclk(i915);
711 }
712 
713 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
714 				 struct intel_shared_dpll *pll)
715 {
716 	enum intel_dpll_id id = pll->info->id;
717 
718 	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
719 	intel_de_posting_read(i915, SPLL_CTL);
720 
721 	/*
722 	 * Try to set up the PCH reference clock once all DPLLs
723 	 * that depend on it have been shut down.
724 	 */
725 	if (i915->display.dpll.pch_ssc_use & BIT(id))
726 		intel_init_pch_refclk(i915);
727 }
728 
729 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
730 				       struct intel_shared_dpll *pll,
731 				       struct intel_dpll_hw_state *hw_state)
732 {
733 	const enum intel_dpll_id id = pll->info->id;
734 	intel_wakeref_t wakeref;
735 	u32 val;
736 
737 	wakeref = intel_display_power_get_if_enabled(i915,
738 						     POWER_DOMAIN_DISPLAY_CORE);
739 	if (!wakeref)
740 		return false;
741 
742 	val = intel_de_read(i915, WRPLL_CTL(id));
743 	hw_state->wrpll = val;
744 
745 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
746 
747 	return val & WRPLL_PLL_ENABLE;
748 }
749 
750 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
751 				      struct intel_shared_dpll *pll,
752 				      struct intel_dpll_hw_state *hw_state)
753 {
754 	intel_wakeref_t wakeref;
755 	u32 val;
756 
757 	wakeref = intel_display_power_get_if_enabled(i915,
758 						     POWER_DOMAIN_DISPLAY_CORE);
759 	if (!wakeref)
760 		return false;
761 
762 	val = intel_de_read(i915, SPLL_CTL);
763 	hw_state->spll = val;
764 
765 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
766 
767 	return val & SPLL_PLL_ENABLE;
768 }
769 
770 #define LC_FREQ 2700
771 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
772 
773 #define P_MIN 2
774 #define P_MAX 64
775 #define P_INC 2
776 
777 /* Constraints for PLL good behavior */
778 #define REF_MIN 48
779 #define REF_MAX 400
780 #define VCO_MIN 2400
781 #define VCO_MAX 4800
782 
783 struct hsw_wrpll_rnp {
784 	unsigned p, n2, r2;
785 };
786 
787 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
788 {
789 	switch (clock) {
790 	case 25175000:
791 	case 25200000:
792 	case 27000000:
793 	case 27027000:
794 	case 37762500:
795 	case 37800000:
796 	case 40500000:
797 	case 40541000:
798 	case 54000000:
799 	case 54054000:
800 	case 59341000:
801 	case 59400000:
802 	case 72000000:
803 	case 74176000:
804 	case 74250000:
805 	case 81000000:
806 	case 81081000:
807 	case 89012000:
808 	case 89100000:
809 	case 108000000:
810 	case 108108000:
811 	case 111264000:
812 	case 111375000:
813 	case 148352000:
814 	case 148500000:
815 	case 162000000:
816 	case 162162000:
817 	case 222525000:
818 	case 222750000:
819 	case 296703000:
820 	case 297000000:
821 		return 0;
822 	case 233500000:
823 	case 245250000:
824 	case 247750000:
825 	case 253250000:
826 	case 298000000:
827 		return 1500;
828 	case 169128000:
829 	case 169500000:
830 	case 179500000:
831 	case 202000000:
832 		return 2000;
833 	case 256250000:
834 	case 262500000:
835 	case 270000000:
836 	case 272500000:
837 	case 273750000:
838 	case 280750000:
839 	case 281250000:
840 	case 286000000:
841 	case 291750000:
842 		return 4000;
843 	case 267250000:
844 	case 268500000:
845 		return 5000;
846 	default:
847 		return 1000;
848 	}
849 }
850 
851 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
852 				 unsigned int r2, unsigned int n2,
853 				 unsigned int p,
854 				 struct hsw_wrpll_rnp *best)
855 {
856 	u64 a, b, c, d, diff, diff_best;
857 
858 	/* No best (r,n,p) yet */
859 	if (best->p == 0) {
860 		best->p = p;
861 		best->n2 = n2;
862 		best->r2 = r2;
863 		return;
864 	}
865 
866 	/*
867 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
868 	 * freq2k.
869 	 *
870 	 * delta = 1e6 *
871 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
872 	 *	   freq2k;
873 	 *
874 	 * and we would like delta <= budget.
875 	 *
876 	 * If the discrepancy is above the PPM-based budget, always prefer to
877 	 * improve upon the previous solution.  However, if you're within the
878 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
879 	 */
880 	a = freq2k * budget * p * r2;
881 	b = freq2k * budget * best->p * best->r2;
882 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
883 	diff_best = abs_diff(freq2k * best->p * best->r2,
884 			     LC_FREQ_2K * best->n2);
885 	c = 1000000 * diff;
886 	d = 1000000 * diff_best;
887 
888 	if (a < c && b < d) {
889 		/* If both are above the budget, pick the closer */
890 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
891 			best->p = p;
892 			best->n2 = n2;
893 			best->r2 = r2;
894 		}
895 	} else if (a >= c && b < d) {
896 		/* If A is below the threshold but B is above it?  Update. */
897 		best->p = p;
898 		best->n2 = n2;
899 		best->r2 = r2;
900 	} else if (a >= c && b >= d) {
901 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
902 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
903 			best->p = p;
904 			best->n2 = n2;
905 			best->r2 = r2;
906 		}
907 	}
908 	/* Otherwise a < c && b >= d, do nothing */
909 }
910 
911 static void
912 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
913 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
914 {
915 	u64 freq2k;
916 	unsigned p, n2, r2;
917 	struct hsw_wrpll_rnp best = {};
918 	unsigned budget;
919 
920 	freq2k = clock / 100;
921 
922 	budget = hsw_wrpll_get_budget_for_freq(clock);
923 
924 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
925 	 * and directly pass the LC PLL to it. */
926 	if (freq2k == 5400000) {
927 		*n2_out = 2;
928 		*p_out = 1;
929 		*r2_out = 2;
930 		return;
931 	}
932 
933 	/*
934 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
935 	 * the WR PLL.
936 	 *
937 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
938 	 * Injecting R2 = 2 * R gives:
939 	 *   REF_MAX * r2 > LC_FREQ * 2 and
940 	 *   REF_MIN * r2 < LC_FREQ * 2
941 	 *
942 	 * Which means the desired boundaries for r2 are:
943 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
944 	 *
945 	 */
946 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
947 	     r2 <= LC_FREQ * 2 / REF_MIN;
948 	     r2++) {
949 
950 		/*
951 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
952 		 *
953 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
954 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
955 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
956 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
957 		 *
958 		 * Which means the desired boundaries for n2 are:
959 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
960 		 */
961 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
962 		     n2 <= VCO_MAX * r2 / LC_FREQ;
963 		     n2++) {
964 
965 			for (p = P_MIN; p <= P_MAX; p += P_INC)
966 				hsw_wrpll_update_rnp(freq2k, budget,
967 						     r2, n2, p, &best);
968 		}
969 	}
970 
971 	*n2_out = best.n2;
972 	*p_out = best.p;
973 	*r2_out = best.r2;
974 }
975 
976 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
977 				  const struct intel_shared_dpll *pll,
978 				  const struct intel_dpll_hw_state *pll_state)
979 {
980 	int refclk;
981 	int n, p, r;
982 	u32 wrpll = pll_state->wrpll;
983 
984 	switch (wrpll & WRPLL_REF_MASK) {
985 	case WRPLL_REF_SPECIAL_HSW:
986 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
987 		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
988 			refclk = i915->display.dpll.ref_clks.nssc;
989 			break;
990 		}
991 		fallthrough;
992 	case WRPLL_REF_PCH_SSC:
993 		/*
994 		 * We could calculate spread here, but our checking
995 		 * code only cares about 5% accuracy, and spread is a max of
996 		 * 0.5% downspread.
997 		 */
998 		refclk = i915->display.dpll.ref_clks.ssc;
999 		break;
1000 	case WRPLL_REF_LCPLL:
1001 		refclk = 2700000;
1002 		break;
1003 	default:
1004 		MISSING_CASE(wrpll);
1005 		return 0;
1006 	}
1007 
1008 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1009 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1010 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1011 
1012 	/* Convert to KHz, p & r have a fixed point portion */
1013 	return (refclk * n / 10) / (p * r) * 2;
1014 }
1015 
1016 static int
1017 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1018 			   struct intel_crtc *crtc)
1019 {
1020 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1021 	struct intel_crtc_state *crtc_state =
1022 		intel_atomic_get_new_crtc_state(state, crtc);
1023 	unsigned int p, n2, r2;
1024 
1025 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1026 
1027 	crtc_state->dpll_hw_state.wrpll =
1028 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1029 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1030 		WRPLL_DIVIDER_POST(p);
1031 
1032 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1033 							&crtc_state->dpll_hw_state);
1034 
1035 	return 0;
1036 }
1037 
1038 static struct intel_shared_dpll *
1039 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1040 		       struct intel_crtc *crtc)
1041 {
1042 	struct intel_crtc_state *crtc_state =
1043 		intel_atomic_get_new_crtc_state(state, crtc);
1044 
1045 	return intel_find_shared_dpll(state, crtc,
1046 				      &crtc_state->dpll_hw_state,
1047 				      BIT(DPLL_ID_WRPLL2) |
1048 				      BIT(DPLL_ID_WRPLL1));
1049 }
1050 
1051 static int
1052 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1053 {
1054 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1055 	int clock = crtc_state->port_clock;
1056 
1057 	switch (clock / 2) {
1058 	case 81000:
1059 	case 135000:
1060 	case 270000:
1061 		return 0;
1062 	default:
1063 		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1064 			    clock);
1065 		return -EINVAL;
1066 	}
1067 }
1068 
1069 static struct intel_shared_dpll *
1070 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1071 {
1072 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1073 	struct intel_shared_dpll *pll;
1074 	enum intel_dpll_id pll_id;
1075 	int clock = crtc_state->port_clock;
1076 
1077 	switch (clock / 2) {
1078 	case 81000:
1079 		pll_id = DPLL_ID_LCPLL_810;
1080 		break;
1081 	case 135000:
1082 		pll_id = DPLL_ID_LCPLL_1350;
1083 		break;
1084 	case 270000:
1085 		pll_id = DPLL_ID_LCPLL_2700;
1086 		break;
1087 	default:
1088 		MISSING_CASE(clock / 2);
1089 		return NULL;
1090 	}
1091 
1092 	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1093 
1094 	if (!pll)
1095 		return NULL;
1096 
1097 	return pll;
1098 }
1099 
1100 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1101 				  const struct intel_shared_dpll *pll,
1102 				  const struct intel_dpll_hw_state *pll_state)
1103 {
1104 	int link_clock = 0;
1105 
1106 	switch (pll->info->id) {
1107 	case DPLL_ID_LCPLL_810:
1108 		link_clock = 81000;
1109 		break;
1110 	case DPLL_ID_LCPLL_1350:
1111 		link_clock = 135000;
1112 		break;
1113 	case DPLL_ID_LCPLL_2700:
1114 		link_clock = 270000;
1115 		break;
1116 	default:
1117 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1118 		break;
1119 	}
1120 
1121 	return link_clock * 2;
1122 }
1123 
1124 static int
1125 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1126 			  struct intel_crtc *crtc)
1127 {
1128 	struct intel_crtc_state *crtc_state =
1129 		intel_atomic_get_new_crtc_state(state, crtc);
1130 
1131 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1132 		return -EINVAL;
1133 
1134 	crtc_state->dpll_hw_state.spll =
1135 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1136 
1137 	return 0;
1138 }
1139 
1140 static struct intel_shared_dpll *
1141 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1142 		      struct intel_crtc *crtc)
1143 {
1144 	struct intel_crtc_state *crtc_state =
1145 		intel_atomic_get_new_crtc_state(state, crtc);
1146 
1147 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1148 				      BIT(DPLL_ID_SPLL));
1149 }
1150 
1151 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1152 				 const struct intel_shared_dpll *pll,
1153 				 const struct intel_dpll_hw_state *pll_state)
1154 {
1155 	int link_clock = 0;
1156 
1157 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1158 	case SPLL_FREQ_810MHz:
1159 		link_clock = 81000;
1160 		break;
1161 	case SPLL_FREQ_1350MHz:
1162 		link_clock = 135000;
1163 		break;
1164 	case SPLL_FREQ_2700MHz:
1165 		link_clock = 270000;
1166 		break;
1167 	default:
1168 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1169 		break;
1170 	}
1171 
1172 	return link_clock * 2;
1173 }
1174 
1175 static int hsw_compute_dpll(struct intel_atomic_state *state,
1176 			    struct intel_crtc *crtc,
1177 			    struct intel_encoder *encoder)
1178 {
1179 	struct intel_crtc_state *crtc_state =
1180 		intel_atomic_get_new_crtc_state(state, crtc);
1181 
1182 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1183 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1184 	else if (intel_crtc_has_dp_encoder(crtc_state))
1185 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1186 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1187 		return hsw_ddi_spll_compute_dpll(state, crtc);
1188 	else
1189 		return -EINVAL;
1190 }
1191 
1192 static int hsw_get_dpll(struct intel_atomic_state *state,
1193 			struct intel_crtc *crtc,
1194 			struct intel_encoder *encoder)
1195 {
1196 	struct intel_crtc_state *crtc_state =
1197 		intel_atomic_get_new_crtc_state(state, crtc);
1198 	struct intel_shared_dpll *pll = NULL;
1199 
1200 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1201 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1202 	else if (intel_crtc_has_dp_encoder(crtc_state))
1203 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1204 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1205 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1206 
1207 	if (!pll)
1208 		return -EINVAL;
1209 
1210 	intel_reference_shared_dpll(state, crtc,
1211 				    pll, &crtc_state->dpll_hw_state);
1212 
1213 	crtc_state->shared_dpll = pll;
1214 
1215 	return 0;
1216 }
1217 
1218 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1219 {
1220 	i915->display.dpll.ref_clks.ssc = 135000;
1221 	/* Non-SSC is only used on non-ULT HSW. */
1222 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1223 		i915->display.dpll.ref_clks.nssc = 24000;
1224 	else
1225 		i915->display.dpll.ref_clks.nssc = 135000;
1226 }
1227 
1228 static void hsw_dump_hw_state(struct drm_i915_private *i915,
1229 			      const struct intel_dpll_hw_state *hw_state)
1230 {
1231 	drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1232 		    hw_state->wrpll, hw_state->spll);
1233 }
1234 
1235 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
1236 				 const struct intel_dpll_hw_state *b)
1237 {
1238 	return a->wrpll == b->wrpll &&
1239 		a->spll == b->spll;
1240 }
1241 
1242 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1243 	.enable = hsw_ddi_wrpll_enable,
1244 	.disable = hsw_ddi_wrpll_disable,
1245 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1246 	.get_freq = hsw_ddi_wrpll_get_freq,
1247 };
1248 
1249 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1250 	.enable = hsw_ddi_spll_enable,
1251 	.disable = hsw_ddi_spll_disable,
1252 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1253 	.get_freq = hsw_ddi_spll_get_freq,
1254 };
1255 
1256 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1257 				 struct intel_shared_dpll *pll)
1258 {
1259 }
1260 
1261 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1262 				  struct intel_shared_dpll *pll)
1263 {
1264 }
1265 
1266 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1267 				       struct intel_shared_dpll *pll,
1268 				       struct intel_dpll_hw_state *hw_state)
1269 {
1270 	return true;
1271 }
1272 
1273 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1274 	.enable = hsw_ddi_lcpll_enable,
1275 	.disable = hsw_ddi_lcpll_disable,
1276 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1277 	.get_freq = hsw_ddi_lcpll_get_freq,
1278 };
1279 
1280 static const struct dpll_info hsw_plls[] = {
1281 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1282 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1283 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1284 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1285 	  .always_on = true, },
1286 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1287 	  .always_on = true, },
1288 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1289 	  .always_on = true, },
1290 	{}
1291 };
1292 
1293 static const struct intel_dpll_mgr hsw_pll_mgr = {
1294 	.dpll_info = hsw_plls,
1295 	.compute_dplls = hsw_compute_dpll,
1296 	.get_dplls = hsw_get_dpll,
1297 	.put_dplls = intel_put_dpll,
1298 	.update_ref_clks = hsw_update_dpll_ref_clks,
1299 	.dump_hw_state = hsw_dump_hw_state,
1300 	.compare_hw_state = hsw_compare_hw_state,
1301 };
1302 
1303 struct skl_dpll_regs {
1304 	i915_reg_t ctl, cfgcr1, cfgcr2;
1305 };
1306 
1307 /* this array is indexed by the *shared* pll id */
1308 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1309 	{
1310 		/* DPLL 0 */
1311 		.ctl = LCPLL1_CTL,
1312 		/* DPLL 0 doesn't support HDMI mode */
1313 	},
1314 	{
1315 		/* DPLL 1 */
1316 		.ctl = LCPLL2_CTL,
1317 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1318 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1319 	},
1320 	{
1321 		/* DPLL 2 */
1322 		.ctl = WRPLL_CTL(0),
1323 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1324 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1325 	},
1326 	{
1327 		/* DPLL 3 */
1328 		.ctl = WRPLL_CTL(1),
1329 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1330 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1331 	},
1332 };
1333 
1334 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1335 				    struct intel_shared_dpll *pll)
1336 {
1337 	const enum intel_dpll_id id = pll->info->id;
1338 
1339 	intel_de_rmw(i915, DPLL_CTRL1,
1340 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1341 		     pll->state.hw_state.ctrl1 << (id * 6));
1342 	intel_de_posting_read(i915, DPLL_CTRL1);
1343 }
1344 
1345 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1346 			       struct intel_shared_dpll *pll)
1347 {
1348 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1349 	const enum intel_dpll_id id = pll->info->id;
1350 
1351 	skl_ddi_pll_write_ctrl1(i915, pll);
1352 
1353 	intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1354 	intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1355 	intel_de_posting_read(i915, regs[id].cfgcr1);
1356 	intel_de_posting_read(i915, regs[id].cfgcr2);
1357 
1358 	/* the enable bit is always bit 31 */
1359 	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1360 
1361 	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1362 		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1363 }
1364 
1365 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1366 				 struct intel_shared_dpll *pll)
1367 {
1368 	skl_ddi_pll_write_ctrl1(i915, pll);
1369 }
1370 
1371 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1372 				struct intel_shared_dpll *pll)
1373 {
1374 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1375 	const enum intel_dpll_id id = pll->info->id;
1376 
1377 	/* the enable bit is always bit 31 */
1378 	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1379 	intel_de_posting_read(i915, regs[id].ctl);
1380 }
1381 
1382 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1383 				  struct intel_shared_dpll *pll)
1384 {
1385 }
1386 
1387 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1388 				     struct intel_shared_dpll *pll,
1389 				     struct intel_dpll_hw_state *hw_state)
1390 {
1391 	u32 val;
1392 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1393 	const enum intel_dpll_id id = pll->info->id;
1394 	intel_wakeref_t wakeref;
1395 	bool ret;
1396 
1397 	wakeref = intel_display_power_get_if_enabled(i915,
1398 						     POWER_DOMAIN_DISPLAY_CORE);
1399 	if (!wakeref)
1400 		return false;
1401 
1402 	ret = false;
1403 
1404 	val = intel_de_read(i915, regs[id].ctl);
1405 	if (!(val & LCPLL_PLL_ENABLE))
1406 		goto out;
1407 
1408 	val = intel_de_read(i915, DPLL_CTRL1);
1409 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1410 
1411 	/* avoid reading back stale values if HDMI mode is not enabled */
1412 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1413 		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1414 		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1415 	}
1416 	ret = true;
1417 
1418 out:
1419 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1420 
1421 	return ret;
1422 }
1423 
1424 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1425 				       struct intel_shared_dpll *pll,
1426 				       struct intel_dpll_hw_state *hw_state)
1427 {
1428 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1429 	const enum intel_dpll_id id = pll->info->id;
1430 	intel_wakeref_t wakeref;
1431 	u32 val;
1432 	bool ret;
1433 
1434 	wakeref = intel_display_power_get_if_enabled(i915,
1435 						     POWER_DOMAIN_DISPLAY_CORE);
1436 	if (!wakeref)
1437 		return false;
1438 
1439 	ret = false;
1440 
1441 	/* DPLL0 is always enabled since it drives CDCLK */
1442 	val = intel_de_read(i915, regs[id].ctl);
1443 	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1444 		goto out;
1445 
1446 	val = intel_de_read(i915, DPLL_CTRL1);
1447 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1448 
1449 	ret = true;
1450 
1451 out:
1452 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1453 
1454 	return ret;
1455 }
1456 
1457 struct skl_wrpll_context {
1458 	u64 min_deviation;		/* current minimal deviation */
1459 	u64 central_freq;		/* chosen central freq */
1460 	u64 dco_freq;			/* chosen dco freq */
1461 	unsigned int p;			/* chosen divider */
1462 };
1463 
1464 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1465 #define SKL_DCO_MAX_PDEVIATION	100
1466 #define SKL_DCO_MAX_NDEVIATION	600
1467 
1468 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1469 				  u64 central_freq,
1470 				  u64 dco_freq,
1471 				  unsigned int divider)
1472 {
1473 	u64 deviation;
1474 
1475 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1476 			      central_freq);
1477 
1478 	/* positive deviation */
1479 	if (dco_freq >= central_freq) {
1480 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1481 		    deviation < ctx->min_deviation) {
1482 			ctx->min_deviation = deviation;
1483 			ctx->central_freq = central_freq;
1484 			ctx->dco_freq = dco_freq;
1485 			ctx->p = divider;
1486 		}
1487 	/* negative deviation */
1488 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1489 		   deviation < ctx->min_deviation) {
1490 		ctx->min_deviation = deviation;
1491 		ctx->central_freq = central_freq;
1492 		ctx->dco_freq = dco_freq;
1493 		ctx->p = divider;
1494 	}
1495 }
1496 
1497 static void skl_wrpll_get_multipliers(unsigned int p,
1498 				      unsigned int *p0 /* out */,
1499 				      unsigned int *p1 /* out */,
1500 				      unsigned int *p2 /* out */)
1501 {
1502 	/* even dividers */
1503 	if (p % 2 == 0) {
1504 		unsigned int half = p / 2;
1505 
1506 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1507 			*p0 = 2;
1508 			*p1 = 1;
1509 			*p2 = half;
1510 		} else if (half % 2 == 0) {
1511 			*p0 = 2;
1512 			*p1 = half / 2;
1513 			*p2 = 2;
1514 		} else if (half % 3 == 0) {
1515 			*p0 = 3;
1516 			*p1 = half / 3;
1517 			*p2 = 2;
1518 		} else if (half % 7 == 0) {
1519 			*p0 = 7;
1520 			*p1 = half / 7;
1521 			*p2 = 2;
1522 		}
1523 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1524 		*p0 = 3;
1525 		*p1 = 1;
1526 		*p2 = p / 3;
1527 	} else if (p == 5 || p == 7) {
1528 		*p0 = p;
1529 		*p1 = 1;
1530 		*p2 = 1;
1531 	} else if (p == 15) {
1532 		*p0 = 3;
1533 		*p1 = 1;
1534 		*p2 = 5;
1535 	} else if (p == 21) {
1536 		*p0 = 7;
1537 		*p1 = 1;
1538 		*p2 = 3;
1539 	} else if (p == 35) {
1540 		*p0 = 7;
1541 		*p1 = 1;
1542 		*p2 = 5;
1543 	}
1544 }
1545 
1546 struct skl_wrpll_params {
1547 	u32 dco_fraction;
1548 	u32 dco_integer;
1549 	u32 qdiv_ratio;
1550 	u32 qdiv_mode;
1551 	u32 kdiv;
1552 	u32 pdiv;
1553 	u32 central_freq;
1554 };
1555 
1556 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1557 				      u64 afe_clock,
1558 				      int ref_clock,
1559 				      u64 central_freq,
1560 				      u32 p0, u32 p1, u32 p2)
1561 {
1562 	u64 dco_freq;
1563 
1564 	switch (central_freq) {
1565 	case 9600000000ULL:
1566 		params->central_freq = 0;
1567 		break;
1568 	case 9000000000ULL:
1569 		params->central_freq = 1;
1570 		break;
1571 	case 8400000000ULL:
1572 		params->central_freq = 3;
1573 	}
1574 
1575 	switch (p0) {
1576 	case 1:
1577 		params->pdiv = 0;
1578 		break;
1579 	case 2:
1580 		params->pdiv = 1;
1581 		break;
1582 	case 3:
1583 		params->pdiv = 2;
1584 		break;
1585 	case 7:
1586 		params->pdiv = 4;
1587 		break;
1588 	default:
1589 		WARN(1, "Incorrect PDiv\n");
1590 	}
1591 
1592 	switch (p2) {
1593 	case 5:
1594 		params->kdiv = 0;
1595 		break;
1596 	case 2:
1597 		params->kdiv = 1;
1598 		break;
1599 	case 3:
1600 		params->kdiv = 2;
1601 		break;
1602 	case 1:
1603 		params->kdiv = 3;
1604 		break;
1605 	default:
1606 		WARN(1, "Incorrect KDiv\n");
1607 	}
1608 
1609 	params->qdiv_ratio = p1;
1610 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1611 
1612 	dco_freq = p0 * p1 * p2 * afe_clock;
1613 
1614 	/*
1615 	 * Intermediate values are in Hz.
1616 	 * Divide by MHz to match bsepc
1617 	 */
1618 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1619 	params->dco_fraction =
1620 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1621 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1622 }
1623 
1624 static int
1625 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1626 			int ref_clock,
1627 			struct skl_wrpll_params *wrpll_params)
1628 {
1629 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1630 						 9000000000ULL,
1631 						 9600000000ULL };
1632 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1633 					    24, 28, 30, 32, 36, 40, 42, 44,
1634 					    48, 52, 54, 56, 60, 64, 66, 68,
1635 					    70, 72, 76, 78, 80, 84, 88, 90,
1636 					    92, 96, 98 };
1637 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1638 	static const struct {
1639 		const u8 *list;
1640 		int n_dividers;
1641 	} dividers[] = {
1642 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1643 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1644 	};
1645 	struct skl_wrpll_context ctx = {
1646 		.min_deviation = U64_MAX,
1647 	};
1648 	unsigned int dco, d, i;
1649 	unsigned int p0, p1, p2;
1650 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1651 
1652 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1653 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1654 			for (i = 0; i < dividers[d].n_dividers; i++) {
1655 				unsigned int p = dividers[d].list[i];
1656 				u64 dco_freq = p * afe_clock;
1657 
1658 				skl_wrpll_try_divider(&ctx,
1659 						      dco_central_freq[dco],
1660 						      dco_freq,
1661 						      p);
1662 				/*
1663 				 * Skip the remaining dividers if we're sure to
1664 				 * have found the definitive divider, we can't
1665 				 * improve a 0 deviation.
1666 				 */
1667 				if (ctx.min_deviation == 0)
1668 					goto skip_remaining_dividers;
1669 			}
1670 		}
1671 
1672 skip_remaining_dividers:
1673 		/*
1674 		 * If a solution is found with an even divider, prefer
1675 		 * this one.
1676 		 */
1677 		if (d == 0 && ctx.p)
1678 			break;
1679 	}
1680 
1681 	if (!ctx.p)
1682 		return -EINVAL;
1683 
1684 	/*
1685 	 * gcc incorrectly analyses that these can be used without being
1686 	 * initialized. To be fair, it's hard to guess.
1687 	 */
1688 	p0 = p1 = p2 = 0;
1689 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1690 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1691 				  ctx.central_freq, p0, p1, p2);
1692 
1693 	return 0;
1694 }
1695 
1696 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1697 				  const struct intel_shared_dpll *pll,
1698 				  const struct intel_dpll_hw_state *pll_state)
1699 {
1700 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1701 	u32 p0, p1, p2, dco_freq;
1702 
1703 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1704 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1705 
1706 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1707 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1708 	else
1709 		p1 = 1;
1710 
1711 
1712 	switch (p0) {
1713 	case DPLL_CFGCR2_PDIV_1:
1714 		p0 = 1;
1715 		break;
1716 	case DPLL_CFGCR2_PDIV_2:
1717 		p0 = 2;
1718 		break;
1719 	case DPLL_CFGCR2_PDIV_3:
1720 		p0 = 3;
1721 		break;
1722 	case DPLL_CFGCR2_PDIV_7_INVALID:
1723 		/*
1724 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1725 		 * handling it the same way as PDIV_7.
1726 		 */
1727 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1728 		fallthrough;
1729 	case DPLL_CFGCR2_PDIV_7:
1730 		p0 = 7;
1731 		break;
1732 	default:
1733 		MISSING_CASE(p0);
1734 		return 0;
1735 	}
1736 
1737 	switch (p2) {
1738 	case DPLL_CFGCR2_KDIV_5:
1739 		p2 = 5;
1740 		break;
1741 	case DPLL_CFGCR2_KDIV_2:
1742 		p2 = 2;
1743 		break;
1744 	case DPLL_CFGCR2_KDIV_3:
1745 		p2 = 3;
1746 		break;
1747 	case DPLL_CFGCR2_KDIV_1:
1748 		p2 = 1;
1749 		break;
1750 	default:
1751 		MISSING_CASE(p2);
1752 		return 0;
1753 	}
1754 
1755 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1756 		   ref_clock;
1757 
1758 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1759 		    ref_clock / 0x8000;
1760 
1761 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1762 		return 0;
1763 
1764 	return dco_freq / (p0 * p1 * p2 * 5);
1765 }
1766 
1767 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1768 {
1769 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1770 	struct skl_wrpll_params wrpll_params = {};
1771 	u32 ctrl1, cfgcr1, cfgcr2;
1772 	int ret;
1773 
1774 	/*
1775 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1776 	 * as the DPLL id in this function.
1777 	 */
1778 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1779 
1780 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1781 
1782 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1783 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1784 	if (ret)
1785 		return ret;
1786 
1787 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1788 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1789 		wrpll_params.dco_integer;
1790 
1791 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1792 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1793 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1794 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1795 		wrpll_params.central_freq;
1796 
1797 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1798 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1799 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1800 
1801 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1802 							&crtc_state->dpll_hw_state);
1803 
1804 	return 0;
1805 }
1806 
1807 static int
1808 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1809 {
1810 	u32 ctrl1;
1811 
1812 	/*
1813 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1814 	 * as the DPLL id in this function.
1815 	 */
1816 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1817 	switch (crtc_state->port_clock / 2) {
1818 	case 81000:
1819 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1820 		break;
1821 	case 135000:
1822 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1823 		break;
1824 	case 270000:
1825 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1826 		break;
1827 		/* eDP 1.4 rates */
1828 	case 162000:
1829 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1830 		break;
1831 	case 108000:
1832 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1833 		break;
1834 	case 216000:
1835 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1836 		break;
1837 	}
1838 
1839 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1840 
1841 	return 0;
1842 }
1843 
1844 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1845 				  const struct intel_shared_dpll *pll,
1846 				  const struct intel_dpll_hw_state *pll_state)
1847 {
1848 	int link_clock = 0;
1849 
1850 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1851 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1852 	case DPLL_CTRL1_LINK_RATE_810:
1853 		link_clock = 81000;
1854 		break;
1855 	case DPLL_CTRL1_LINK_RATE_1080:
1856 		link_clock = 108000;
1857 		break;
1858 	case DPLL_CTRL1_LINK_RATE_1350:
1859 		link_clock = 135000;
1860 		break;
1861 	case DPLL_CTRL1_LINK_RATE_1620:
1862 		link_clock = 162000;
1863 		break;
1864 	case DPLL_CTRL1_LINK_RATE_2160:
1865 		link_clock = 216000;
1866 		break;
1867 	case DPLL_CTRL1_LINK_RATE_2700:
1868 		link_clock = 270000;
1869 		break;
1870 	default:
1871 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1872 		break;
1873 	}
1874 
1875 	return link_clock * 2;
1876 }
1877 
1878 static int skl_compute_dpll(struct intel_atomic_state *state,
1879 			    struct intel_crtc *crtc,
1880 			    struct intel_encoder *encoder)
1881 {
1882 	struct intel_crtc_state *crtc_state =
1883 		intel_atomic_get_new_crtc_state(state, crtc);
1884 
1885 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1886 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1887 	else if (intel_crtc_has_dp_encoder(crtc_state))
1888 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1889 	else
1890 		return -EINVAL;
1891 }
1892 
1893 static int skl_get_dpll(struct intel_atomic_state *state,
1894 			struct intel_crtc *crtc,
1895 			struct intel_encoder *encoder)
1896 {
1897 	struct intel_crtc_state *crtc_state =
1898 		intel_atomic_get_new_crtc_state(state, crtc);
1899 	struct intel_shared_dpll *pll;
1900 
1901 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1902 		pll = intel_find_shared_dpll(state, crtc,
1903 					     &crtc_state->dpll_hw_state,
1904 					     BIT(DPLL_ID_SKL_DPLL0));
1905 	else
1906 		pll = intel_find_shared_dpll(state, crtc,
1907 					     &crtc_state->dpll_hw_state,
1908 					     BIT(DPLL_ID_SKL_DPLL3) |
1909 					     BIT(DPLL_ID_SKL_DPLL2) |
1910 					     BIT(DPLL_ID_SKL_DPLL1));
1911 	if (!pll)
1912 		return -EINVAL;
1913 
1914 	intel_reference_shared_dpll(state, crtc,
1915 				    pll, &crtc_state->dpll_hw_state);
1916 
1917 	crtc_state->shared_dpll = pll;
1918 
1919 	return 0;
1920 }
1921 
1922 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1923 				const struct intel_shared_dpll *pll,
1924 				const struct intel_dpll_hw_state *pll_state)
1925 {
1926 	/*
1927 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1928 	 * the internal shift for each field
1929 	 */
1930 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1931 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1932 	else
1933 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1934 }
1935 
1936 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1937 {
1938 	/* No SSC ref */
1939 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1940 }
1941 
1942 static void skl_dump_hw_state(struct drm_i915_private *i915,
1943 			      const struct intel_dpll_hw_state *hw_state)
1944 {
1945 	drm_dbg_kms(&i915->drm, "dpll_hw_state: "
1946 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1947 		      hw_state->ctrl1,
1948 		      hw_state->cfgcr1,
1949 		      hw_state->cfgcr2);
1950 }
1951 
1952 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
1953 				 const struct intel_dpll_hw_state *b)
1954 {
1955 	return a->ctrl1 == b->ctrl1 &&
1956 		a->cfgcr1 == b->cfgcr1 &&
1957 		a->cfgcr2 == b->cfgcr2;
1958 }
1959 
1960 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1961 	.enable = skl_ddi_pll_enable,
1962 	.disable = skl_ddi_pll_disable,
1963 	.get_hw_state = skl_ddi_pll_get_hw_state,
1964 	.get_freq = skl_ddi_pll_get_freq,
1965 };
1966 
1967 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1968 	.enable = skl_ddi_dpll0_enable,
1969 	.disable = skl_ddi_dpll0_disable,
1970 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1971 	.get_freq = skl_ddi_pll_get_freq,
1972 };
1973 
1974 static const struct dpll_info skl_plls[] = {
1975 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
1976 	  .always_on = true, },
1977 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
1978 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
1979 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
1980 	{}
1981 };
1982 
1983 static const struct intel_dpll_mgr skl_pll_mgr = {
1984 	.dpll_info = skl_plls,
1985 	.compute_dplls = skl_compute_dpll,
1986 	.get_dplls = skl_get_dpll,
1987 	.put_dplls = intel_put_dpll,
1988 	.update_ref_clks = skl_update_dpll_ref_clks,
1989 	.dump_hw_state = skl_dump_hw_state,
1990 	.compare_hw_state = skl_compare_hw_state,
1991 };
1992 
1993 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1994 			       struct intel_shared_dpll *pll)
1995 {
1996 	u32 temp;
1997 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1998 	enum dpio_phy phy;
1999 	enum dpio_channel ch;
2000 
2001 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2002 
2003 	/* Non-SSC reference */
2004 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2005 
2006 	if (IS_GEMINILAKE(i915)) {
2007 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2008 			     0, PORT_PLL_POWER_ENABLE);
2009 
2010 		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2011 				 PORT_PLL_POWER_STATE), 200))
2012 			drm_err(&i915->drm,
2013 				"Power state not set for PLL:%d\n", port);
2014 	}
2015 
2016 	/* Disable 10 bit clock */
2017 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2018 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2019 
2020 	/* Write P1 & P2 */
2021 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2022 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
2023 
2024 	/* Write M2 integer */
2025 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2026 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
2027 
2028 	/* Write N */
2029 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2030 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
2031 
2032 	/* Write M2 fraction */
2033 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2034 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
2035 
2036 	/* Write M2 fraction enable */
2037 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2038 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
2039 
2040 	/* Write coeff */
2041 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2042 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2043 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2044 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2045 	temp |= pll->state.hw_state.pll6;
2046 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2047 
2048 	/* Write calibration val */
2049 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2050 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
2051 
2052 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2053 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2054 
2055 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2056 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2057 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2058 	temp |= pll->state.hw_state.pll10;
2059 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2060 
2061 	/* Recalibrate with new settings */
2062 	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2063 	temp |= PORT_PLL_RECALIBRATE;
2064 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2065 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2066 	temp |= pll->state.hw_state.ebb4;
2067 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2068 
2069 	/* Enable PLL */
2070 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2071 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2072 
2073 	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2074 			200))
2075 		drm_err(&i915->drm, "PLL %d not locked\n", port);
2076 
2077 	if (IS_GEMINILAKE(i915)) {
2078 		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2079 		temp |= DCC_DELAY_RANGE_2;
2080 		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2081 	}
2082 
2083 	/*
2084 	 * While we write to the group register to program all lanes at once we
2085 	 * can read only lane registers and we pick lanes 0/1 for that.
2086 	 */
2087 	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2088 	temp &= ~LANE_STAGGER_MASK;
2089 	temp &= ~LANESTAGGER_STRAP_OVRD;
2090 	temp |= pll->state.hw_state.pcsdw12;
2091 	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2092 }
2093 
2094 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2095 				struct intel_shared_dpll *pll)
2096 {
2097 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2098 
2099 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2100 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2101 
2102 	if (IS_GEMINILAKE(i915)) {
2103 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2104 			     PORT_PLL_POWER_ENABLE, 0);
2105 
2106 		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2107 				  PORT_PLL_POWER_STATE), 200))
2108 			drm_err(&i915->drm,
2109 				"Power state not reset for PLL:%d\n", port);
2110 	}
2111 }
2112 
2113 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2114 				     struct intel_shared_dpll *pll,
2115 				     struct intel_dpll_hw_state *hw_state)
2116 {
2117 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2118 	intel_wakeref_t wakeref;
2119 	enum dpio_phy phy;
2120 	enum dpio_channel ch;
2121 	u32 val;
2122 	bool ret;
2123 
2124 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2125 
2126 	wakeref = intel_display_power_get_if_enabled(i915,
2127 						     POWER_DOMAIN_DISPLAY_CORE);
2128 	if (!wakeref)
2129 		return false;
2130 
2131 	ret = false;
2132 
2133 	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2134 	if (!(val & PORT_PLL_ENABLE))
2135 		goto out;
2136 
2137 	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2138 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2139 
2140 	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2141 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2142 
2143 	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2144 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2145 
2146 	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2147 	hw_state->pll1 &= PORT_PLL_N_MASK;
2148 
2149 	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2150 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2151 
2152 	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2153 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2154 
2155 	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2156 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2157 			  PORT_PLL_INT_COEFF_MASK |
2158 			  PORT_PLL_GAIN_CTL_MASK;
2159 
2160 	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2161 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2162 
2163 	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2164 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2165 
2166 	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2167 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2168 			   PORT_PLL_DCO_AMP_MASK;
2169 
2170 	/*
2171 	 * While we write to the group register to program all lanes at once we
2172 	 * can read only lane registers. We configure all lanes the same way, so
2173 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2174 	 */
2175 	hw_state->pcsdw12 = intel_de_read(i915,
2176 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2177 	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2178 		drm_dbg(&i915->drm,
2179 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2180 			hw_state->pcsdw12,
2181 			intel_de_read(i915,
2182 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2183 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2184 
2185 	ret = true;
2186 
2187 out:
2188 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2189 
2190 	return ret;
2191 }
2192 
2193 /* pre-calculated values for DP linkrates */
2194 static const struct dpll bxt_dp_clk_val[] = {
2195 	/* m2 is .22 binary fixed point */
2196 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2197 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2198 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2199 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2200 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2201 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2202 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2203 };
2204 
2205 static int
2206 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2207 			  struct dpll *clk_div)
2208 {
2209 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2210 
2211 	/* Calculate HDMI div */
2212 	/*
2213 	 * FIXME: tie the following calculation into
2214 	 * i9xx_crtc_compute_clock
2215 	 */
2216 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2217 		return -EINVAL;
2218 
2219 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2220 
2221 	return 0;
2222 }
2223 
2224 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2225 				    struct dpll *clk_div)
2226 {
2227 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2228 	int i;
2229 
2230 	*clk_div = bxt_dp_clk_val[0];
2231 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2232 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2233 			*clk_div = bxt_dp_clk_val[i];
2234 			break;
2235 		}
2236 	}
2237 
2238 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2239 
2240 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2241 		    clk_div->dot != crtc_state->port_clock);
2242 }
2243 
2244 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2245 				     const struct dpll *clk_div)
2246 {
2247 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2248 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2249 	int clock = crtc_state->port_clock;
2250 	int vco = clk_div->vco;
2251 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2252 	u32 lanestagger;
2253 
2254 	if (vco >= 6200000 && vco <= 6700000) {
2255 		prop_coef = 4;
2256 		int_coef = 9;
2257 		gain_ctl = 3;
2258 		targ_cnt = 8;
2259 	} else if ((vco > 5400000 && vco < 6200000) ||
2260 			(vco >= 4800000 && vco < 5400000)) {
2261 		prop_coef = 5;
2262 		int_coef = 11;
2263 		gain_ctl = 3;
2264 		targ_cnt = 9;
2265 	} else if (vco == 5400000) {
2266 		prop_coef = 3;
2267 		int_coef = 8;
2268 		gain_ctl = 1;
2269 		targ_cnt = 9;
2270 	} else {
2271 		drm_err(&i915->drm, "Invalid VCO\n");
2272 		return -EINVAL;
2273 	}
2274 
2275 	if (clock > 270000)
2276 		lanestagger = 0x18;
2277 	else if (clock > 135000)
2278 		lanestagger = 0x0d;
2279 	else if (clock > 67000)
2280 		lanestagger = 0x07;
2281 	else if (clock > 33000)
2282 		lanestagger = 0x04;
2283 	else
2284 		lanestagger = 0x02;
2285 
2286 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2287 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2288 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2289 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2290 
2291 	if (clk_div->m2 & 0x3fffff)
2292 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2293 
2294 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2295 		PORT_PLL_INT_COEFF(int_coef) |
2296 		PORT_PLL_GAIN_CTL(gain_ctl);
2297 
2298 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2299 
2300 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2301 
2302 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2303 		PORT_PLL_DCO_AMP_OVR_EN_H;
2304 
2305 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2306 
2307 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2308 
2309 	return 0;
2310 }
2311 
2312 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2313 				const struct intel_shared_dpll *pll,
2314 				const struct intel_dpll_hw_state *pll_state)
2315 {
2316 	struct dpll clock;
2317 
2318 	clock.m1 = 2;
2319 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2320 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2321 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2322 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2323 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2324 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2325 
2326 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2327 }
2328 
2329 static int
2330 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2331 {
2332 	struct dpll clk_div = {};
2333 
2334 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2335 
2336 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2337 }
2338 
2339 static int
2340 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2341 {
2342 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2343 	struct dpll clk_div = {};
2344 	int ret;
2345 
2346 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2347 
2348 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2349 	if (ret)
2350 		return ret;
2351 
2352 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2353 						      &crtc_state->dpll_hw_state);
2354 
2355 	return 0;
2356 }
2357 
2358 static int bxt_compute_dpll(struct intel_atomic_state *state,
2359 			    struct intel_crtc *crtc,
2360 			    struct intel_encoder *encoder)
2361 {
2362 	struct intel_crtc_state *crtc_state =
2363 		intel_atomic_get_new_crtc_state(state, crtc);
2364 
2365 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2366 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2367 	else if (intel_crtc_has_dp_encoder(crtc_state))
2368 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2369 	else
2370 		return -EINVAL;
2371 }
2372 
2373 static int bxt_get_dpll(struct intel_atomic_state *state,
2374 			struct intel_crtc *crtc,
2375 			struct intel_encoder *encoder)
2376 {
2377 	struct intel_crtc_state *crtc_state =
2378 		intel_atomic_get_new_crtc_state(state, crtc);
2379 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2380 	struct intel_shared_dpll *pll;
2381 	enum intel_dpll_id id;
2382 
2383 	/* 1:1 mapping between ports and PLLs */
2384 	id = (enum intel_dpll_id) encoder->port;
2385 	pll = intel_get_shared_dpll_by_id(i915, id);
2386 
2387 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2388 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2389 
2390 	intel_reference_shared_dpll(state, crtc,
2391 				    pll, &crtc_state->dpll_hw_state);
2392 
2393 	crtc_state->shared_dpll = pll;
2394 
2395 	return 0;
2396 }
2397 
2398 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2399 {
2400 	i915->display.dpll.ref_clks.ssc = 100000;
2401 	i915->display.dpll.ref_clks.nssc = 100000;
2402 	/* DSI non-SSC ref 19.2MHz */
2403 }
2404 
2405 static void bxt_dump_hw_state(struct drm_i915_private *i915,
2406 			      const struct intel_dpll_hw_state *hw_state)
2407 {
2408 	drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2409 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2410 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2411 		    hw_state->ebb0,
2412 		    hw_state->ebb4,
2413 		    hw_state->pll0,
2414 		    hw_state->pll1,
2415 		    hw_state->pll2,
2416 		    hw_state->pll3,
2417 		    hw_state->pll6,
2418 		    hw_state->pll8,
2419 		    hw_state->pll9,
2420 		    hw_state->pll10,
2421 		    hw_state->pcsdw12);
2422 }
2423 
2424 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
2425 				 const struct intel_dpll_hw_state *b)
2426 {
2427 	return a->ebb0 == b->ebb0 &&
2428 		a->ebb4 == b->ebb4 &&
2429 		a->pll0 == b->pll0 &&
2430 		a->pll1 == b->pll1 &&
2431 		a->pll2 == b->pll2 &&
2432 		a->pll3 == b->pll3 &&
2433 		a->pll6 == b->pll6 &&
2434 		a->pll8 == b->pll8 &&
2435 		a->pll10 == b->pll10 &&
2436 		a->pcsdw12 == b->pcsdw12;
2437 }
2438 
2439 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2440 	.enable = bxt_ddi_pll_enable,
2441 	.disable = bxt_ddi_pll_disable,
2442 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2443 	.get_freq = bxt_ddi_pll_get_freq,
2444 };
2445 
2446 static const struct dpll_info bxt_plls[] = {
2447 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2448 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2449 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2450 	{}
2451 };
2452 
2453 static const struct intel_dpll_mgr bxt_pll_mgr = {
2454 	.dpll_info = bxt_plls,
2455 	.compute_dplls = bxt_compute_dpll,
2456 	.get_dplls = bxt_get_dpll,
2457 	.put_dplls = intel_put_dpll,
2458 	.update_ref_clks = bxt_update_dpll_ref_clks,
2459 	.dump_hw_state = bxt_dump_hw_state,
2460 	.compare_hw_state = bxt_compare_hw_state,
2461 };
2462 
2463 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2464 				      int *qdiv, int *kdiv)
2465 {
2466 	/* even dividers */
2467 	if (bestdiv % 2 == 0) {
2468 		if (bestdiv == 2) {
2469 			*pdiv = 2;
2470 			*qdiv = 1;
2471 			*kdiv = 1;
2472 		} else if (bestdiv % 4 == 0) {
2473 			*pdiv = 2;
2474 			*qdiv = bestdiv / 4;
2475 			*kdiv = 2;
2476 		} else if (bestdiv % 6 == 0) {
2477 			*pdiv = 3;
2478 			*qdiv = bestdiv / 6;
2479 			*kdiv = 2;
2480 		} else if (bestdiv % 5 == 0) {
2481 			*pdiv = 5;
2482 			*qdiv = bestdiv / 10;
2483 			*kdiv = 2;
2484 		} else if (bestdiv % 14 == 0) {
2485 			*pdiv = 7;
2486 			*qdiv = bestdiv / 14;
2487 			*kdiv = 2;
2488 		}
2489 	} else {
2490 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2491 			*pdiv = bestdiv;
2492 			*qdiv = 1;
2493 			*kdiv = 1;
2494 		} else { /* 9, 15, 21 */
2495 			*pdiv = bestdiv / 3;
2496 			*qdiv = 1;
2497 			*kdiv = 3;
2498 		}
2499 	}
2500 }
2501 
2502 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2503 				      u32 dco_freq, u32 ref_freq,
2504 				      int pdiv, int qdiv, int kdiv)
2505 {
2506 	u32 dco;
2507 
2508 	switch (kdiv) {
2509 	case 1:
2510 		params->kdiv = 1;
2511 		break;
2512 	case 2:
2513 		params->kdiv = 2;
2514 		break;
2515 	case 3:
2516 		params->kdiv = 4;
2517 		break;
2518 	default:
2519 		WARN(1, "Incorrect KDiv\n");
2520 	}
2521 
2522 	switch (pdiv) {
2523 	case 2:
2524 		params->pdiv = 1;
2525 		break;
2526 	case 3:
2527 		params->pdiv = 2;
2528 		break;
2529 	case 5:
2530 		params->pdiv = 4;
2531 		break;
2532 	case 7:
2533 		params->pdiv = 8;
2534 		break;
2535 	default:
2536 		WARN(1, "Incorrect PDiv\n");
2537 	}
2538 
2539 	WARN_ON(kdiv != 2 && qdiv != 1);
2540 
2541 	params->qdiv_ratio = qdiv;
2542 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2543 
2544 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2545 
2546 	params->dco_integer = dco >> 15;
2547 	params->dco_fraction = dco & 0x7fff;
2548 }
2549 
2550 /*
2551  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2552  * Program half of the nominal DCO divider fraction value.
2553  */
2554 static bool
2555 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2556 {
2557 	return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
2558 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2559 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2560 		 i915->display.dpll.ref_clks.nssc == 38400;
2561 }
2562 
2563 struct icl_combo_pll_params {
2564 	int clock;
2565 	struct skl_wrpll_params wrpll;
2566 };
2567 
2568 /*
2569  * These values alrea already adjusted: they're the bits we write to the
2570  * registers, not the logical values.
2571  */
2572 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2573 	{ 540000,
2574 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2575 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2576 	{ 270000,
2577 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2578 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2579 	{ 162000,
2580 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2581 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2582 	{ 324000,
2583 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2584 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2585 	{ 216000,
2586 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2587 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2588 	{ 432000,
2589 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2590 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2591 	{ 648000,
2592 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2593 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2594 	{ 810000,
2595 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2596 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2597 };
2598 
2599 
2600 /* Also used for 38.4 MHz values. */
2601 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2602 	{ 540000,
2603 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2604 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2605 	{ 270000,
2606 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2607 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2608 	{ 162000,
2609 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2610 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2611 	{ 324000,
2612 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2613 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2614 	{ 216000,
2615 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2616 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2617 	{ 432000,
2618 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2619 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2620 	{ 648000,
2621 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2622 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2623 	{ 810000,
2624 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2625 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2626 };
2627 
2628 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2629 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2630 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2631 };
2632 
2633 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2634 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2635 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2636 };
2637 
2638 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2639 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2640 	/* the following params are unused */
2641 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2642 };
2643 
2644 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2645 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2646 	/* the following params are unused */
2647 };
2648 
2649 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2650 				 struct skl_wrpll_params *pll_params)
2651 {
2652 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2653 	const struct icl_combo_pll_params *params =
2654 		i915->display.dpll.ref_clks.nssc == 24000 ?
2655 		icl_dp_combo_pll_24MHz_values :
2656 		icl_dp_combo_pll_19_2MHz_values;
2657 	int clock = crtc_state->port_clock;
2658 	int i;
2659 
2660 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2661 		if (clock == params[i].clock) {
2662 			*pll_params = params[i].wrpll;
2663 			return 0;
2664 		}
2665 	}
2666 
2667 	MISSING_CASE(clock);
2668 	return -EINVAL;
2669 }
2670 
2671 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2672 			    struct skl_wrpll_params *pll_params)
2673 {
2674 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2675 
2676 	if (DISPLAY_VER(i915) >= 12) {
2677 		switch (i915->display.dpll.ref_clks.nssc) {
2678 		default:
2679 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2680 			fallthrough;
2681 		case 19200:
2682 		case 38400:
2683 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2684 			break;
2685 		case 24000:
2686 			*pll_params = tgl_tbt_pll_24MHz_values;
2687 			break;
2688 		}
2689 	} else {
2690 		switch (i915->display.dpll.ref_clks.nssc) {
2691 		default:
2692 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2693 			fallthrough;
2694 		case 19200:
2695 		case 38400:
2696 			*pll_params = icl_tbt_pll_19_2MHz_values;
2697 			break;
2698 		case 24000:
2699 			*pll_params = icl_tbt_pll_24MHz_values;
2700 			break;
2701 		}
2702 	}
2703 
2704 	return 0;
2705 }
2706 
2707 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2708 				    const struct intel_shared_dpll *pll,
2709 				    const struct intel_dpll_hw_state *pll_state)
2710 {
2711 	/*
2712 	 * The PLL outputs multiple frequencies at the same time, selection is
2713 	 * made at DDI clock mux level.
2714 	 */
2715 	drm_WARN_ON(&i915->drm, 1);
2716 
2717 	return 0;
2718 }
2719 
2720 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2721 {
2722 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2723 
2724 	/*
2725 	 * For ICL+, the spec states: if reference frequency is 38.4,
2726 	 * use 19.2 because the DPLL automatically divides that by 2.
2727 	 */
2728 	if (ref_clock == 38400)
2729 		ref_clock = 19200;
2730 
2731 	return ref_clock;
2732 }
2733 
2734 static int
2735 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2736 	       struct skl_wrpll_params *wrpll_params)
2737 {
2738 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2739 	int ref_clock = icl_wrpll_ref_clock(i915);
2740 	u32 afe_clock = crtc_state->port_clock * 5;
2741 	u32 dco_min = 7998000;
2742 	u32 dco_max = 10000000;
2743 	u32 dco_mid = (dco_min + dco_max) / 2;
2744 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2745 					 18, 20, 24, 28, 30, 32,  36,  40,
2746 					 42, 44, 48, 50, 52, 54,  56,  60,
2747 					 64, 66, 68, 70, 72, 76,  78,  80,
2748 					 84, 88, 90, 92, 96, 98, 100, 102,
2749 					  3,  5,  7,  9, 15, 21 };
2750 	u32 dco, best_dco = 0, dco_centrality = 0;
2751 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2752 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2753 
2754 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2755 		dco = afe_clock * dividers[d];
2756 
2757 		if (dco <= dco_max && dco >= dco_min) {
2758 			dco_centrality = abs(dco - dco_mid);
2759 
2760 			if (dco_centrality < best_dco_centrality) {
2761 				best_dco_centrality = dco_centrality;
2762 				best_div = dividers[d];
2763 				best_dco = dco;
2764 			}
2765 		}
2766 	}
2767 
2768 	if (best_div == 0)
2769 		return -EINVAL;
2770 
2771 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2772 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2773 				  pdiv, qdiv, kdiv);
2774 
2775 	return 0;
2776 }
2777 
2778 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2779 				      const struct intel_shared_dpll *pll,
2780 				      const struct intel_dpll_hw_state *pll_state)
2781 {
2782 	int ref_clock = icl_wrpll_ref_clock(i915);
2783 	u32 dco_fraction;
2784 	u32 p0, p1, p2, dco_freq;
2785 
2786 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2787 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2788 
2789 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2790 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2791 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2792 	else
2793 		p1 = 1;
2794 
2795 	switch (p0) {
2796 	case DPLL_CFGCR1_PDIV_2:
2797 		p0 = 2;
2798 		break;
2799 	case DPLL_CFGCR1_PDIV_3:
2800 		p0 = 3;
2801 		break;
2802 	case DPLL_CFGCR1_PDIV_5:
2803 		p0 = 5;
2804 		break;
2805 	case DPLL_CFGCR1_PDIV_7:
2806 		p0 = 7;
2807 		break;
2808 	}
2809 
2810 	switch (p2) {
2811 	case DPLL_CFGCR1_KDIV_1:
2812 		p2 = 1;
2813 		break;
2814 	case DPLL_CFGCR1_KDIV_2:
2815 		p2 = 2;
2816 		break;
2817 	case DPLL_CFGCR1_KDIV_3:
2818 		p2 = 3;
2819 		break;
2820 	}
2821 
2822 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2823 		   ref_clock;
2824 
2825 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2826 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2827 
2828 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2829 		dco_fraction *= 2;
2830 
2831 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2832 
2833 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2834 		return 0;
2835 
2836 	return dco_freq / (p0 * p1 * p2 * 5);
2837 }
2838 
2839 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2840 				const struct skl_wrpll_params *pll_params,
2841 				struct intel_dpll_hw_state *pll_state)
2842 {
2843 	u32 dco_fraction = pll_params->dco_fraction;
2844 
2845 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2846 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2847 
2848 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2849 			    pll_params->dco_integer;
2850 
2851 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2852 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2853 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2854 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2855 
2856 	if (DISPLAY_VER(i915) >= 12)
2857 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2858 	else
2859 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2860 
2861 	if (i915->display.vbt.override_afc_startup)
2862 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2863 }
2864 
2865 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2866 				    u32 *target_dco_khz,
2867 				    struct intel_dpll_hw_state *state,
2868 				    bool is_dkl)
2869 {
2870 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2871 	u32 dco_min_freq, dco_max_freq;
2872 	unsigned int i;
2873 	int div2;
2874 
2875 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2876 	dco_max_freq = is_dp ? 8100000 : 10000000;
2877 
2878 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2879 		int div1 = div1_vals[i];
2880 
2881 		for (div2 = 10; div2 > 0; div2--) {
2882 			int dco = div1 * div2 * clock_khz * 5;
2883 			int a_divratio, tlinedrv, inputsel;
2884 			u32 hsdiv;
2885 
2886 			if (dco < dco_min_freq || dco > dco_max_freq)
2887 				continue;
2888 
2889 			if (div2 >= 2) {
2890 				/*
2891 				 * Note: a_divratio not matching TGL BSpec
2892 				 * algorithm but matching hardcoded values and
2893 				 * working on HW for DP alt-mode at least
2894 				 */
2895 				a_divratio = is_dp ? 10 : 5;
2896 				tlinedrv = is_dkl ? 1 : 2;
2897 			} else {
2898 				a_divratio = 5;
2899 				tlinedrv = 0;
2900 			}
2901 			inputsel = is_dp ? 0 : 1;
2902 
2903 			switch (div1) {
2904 			default:
2905 				MISSING_CASE(div1);
2906 				fallthrough;
2907 			case 2:
2908 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2909 				break;
2910 			case 3:
2911 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2912 				break;
2913 			case 5:
2914 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2915 				break;
2916 			case 7:
2917 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2918 				break;
2919 			}
2920 
2921 			*target_dco_khz = dco;
2922 
2923 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2924 
2925 			state->mg_clktop2_coreclkctl1 =
2926 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2927 
2928 			state->mg_clktop2_hsclkctl =
2929 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2930 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2931 				hsdiv |
2932 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2933 
2934 			return 0;
2935 		}
2936 	}
2937 
2938 	return -EINVAL;
2939 }
2940 
2941 /*
2942  * The specification for this function uses real numbers, so the math had to be
2943  * adapted to integer-only calculation, that's why it looks so different.
2944  */
2945 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2946 				 struct intel_dpll_hw_state *pll_state)
2947 {
2948 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2949 	int refclk_khz = i915->display.dpll.ref_clks.nssc;
2950 	int clock = crtc_state->port_clock;
2951 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2952 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2953 	u32 prop_coeff, int_coeff;
2954 	u32 tdc_targetcnt, feedfwgain;
2955 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2956 	u64 tmp;
2957 	bool use_ssc = false;
2958 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2959 	bool is_dkl = DISPLAY_VER(i915) >= 12;
2960 	int ret;
2961 
2962 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2963 				       pll_state, is_dkl);
2964 	if (ret)
2965 		return ret;
2966 
2967 	m1div = 2;
2968 	m2div_int = dco_khz / (refclk_khz * m1div);
2969 	if (m2div_int > 255) {
2970 		if (!is_dkl) {
2971 			m1div = 4;
2972 			m2div_int = dco_khz / (refclk_khz * m1div);
2973 		}
2974 
2975 		if (m2div_int > 255)
2976 			return -EINVAL;
2977 	}
2978 	m2div_rem = dco_khz % (refclk_khz * m1div);
2979 
2980 	tmp = (u64)m2div_rem * (1 << 22);
2981 	do_div(tmp, refclk_khz * m1div);
2982 	m2div_frac = tmp;
2983 
2984 	switch (refclk_khz) {
2985 	case 19200:
2986 		iref_ndiv = 1;
2987 		iref_trim = 28;
2988 		iref_pulse_w = 1;
2989 		break;
2990 	case 24000:
2991 		iref_ndiv = 1;
2992 		iref_trim = 25;
2993 		iref_pulse_w = 2;
2994 		break;
2995 	case 38400:
2996 		iref_ndiv = 2;
2997 		iref_trim = 28;
2998 		iref_pulse_w = 1;
2999 		break;
3000 	default:
3001 		MISSING_CASE(refclk_khz);
3002 		return -EINVAL;
3003 	}
3004 
3005 	/*
3006 	 * tdc_res = 0.000003
3007 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3008 	 *
3009 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3010 	 * was supposed to be a division, but we rearranged the operations of
3011 	 * the formula to avoid early divisions so we don't multiply the
3012 	 * rounding errors.
3013 	 *
3014 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3015 	 * we also rearrange to work with integers.
3016 	 *
3017 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3018 	 * last division by 10.
3019 	 */
3020 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3021 
3022 	/*
3023 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3024 	 * 32 bits. That's not a problem since we round the division down
3025 	 * anyway.
3026 	 */
3027 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3028 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3029 
3030 	if (dco_khz >= 9000000) {
3031 		prop_coeff = 5;
3032 		int_coeff = 10;
3033 	} else {
3034 		prop_coeff = 4;
3035 		int_coeff = 8;
3036 	}
3037 
3038 	if (use_ssc) {
3039 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3040 		do_div(tmp, refclk_khz * m1div * 10000);
3041 		ssc_stepsize = tmp;
3042 
3043 		tmp = mul_u32_u32(dco_khz, 1000);
3044 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3045 	} else {
3046 		ssc_stepsize = 0;
3047 		ssc_steplen = 0;
3048 	}
3049 	ssc_steplog = 4;
3050 
3051 	/* write pll_state calculations */
3052 	if (is_dkl) {
3053 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3054 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3055 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3056 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3057 		if (i915->display.vbt.override_afc_startup) {
3058 			u8 val = i915->display.vbt.override_afc_startup_val;
3059 
3060 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3061 		}
3062 
3063 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3064 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3065 
3066 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3067 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3068 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3069 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3070 
3071 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3072 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3073 
3074 		pll_state->mg_pll_tdc_coldst_bias =
3075 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3076 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3077 
3078 	} else {
3079 		pll_state->mg_pll_div0 =
3080 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3081 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3082 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3083 
3084 		pll_state->mg_pll_div1 =
3085 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3086 			MG_PLL_DIV1_DITHER_DIV_2 |
3087 			MG_PLL_DIV1_NDIVRATIO(1) |
3088 			MG_PLL_DIV1_FBPREDIV(m1div);
3089 
3090 		pll_state->mg_pll_lf =
3091 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3092 			MG_PLL_LF_AFCCNTSEL_512 |
3093 			MG_PLL_LF_GAINCTRL(1) |
3094 			MG_PLL_LF_INT_COEFF(int_coeff) |
3095 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3096 
3097 		pll_state->mg_pll_frac_lock =
3098 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3099 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3100 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3101 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3102 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3103 		if (use_ssc || m2div_rem > 0)
3104 			pll_state->mg_pll_frac_lock |=
3105 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3106 
3107 		pll_state->mg_pll_ssc =
3108 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3109 			MG_PLL_SSC_TYPE(2) |
3110 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3111 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3112 			MG_PLL_SSC_FLLEN |
3113 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3114 
3115 		pll_state->mg_pll_tdc_coldst_bias =
3116 			MG_PLL_TDC_COLDST_COLDSTART |
3117 			MG_PLL_TDC_COLDST_IREFINT_EN |
3118 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3119 			MG_PLL_TDC_TDCOVCCORR_EN |
3120 			MG_PLL_TDC_TDCSEL(3);
3121 
3122 		pll_state->mg_pll_bias =
3123 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3124 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3125 			MG_PLL_BIAS_BIAS_BONUS(10) |
3126 			MG_PLL_BIAS_BIASCAL_EN |
3127 			MG_PLL_BIAS_CTRIM(12) |
3128 			MG_PLL_BIAS_VREF_RDAC(4) |
3129 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3130 
3131 		if (refclk_khz == 38400) {
3132 			pll_state->mg_pll_tdc_coldst_bias_mask =
3133 				MG_PLL_TDC_COLDST_COLDSTART;
3134 			pll_state->mg_pll_bias_mask = 0;
3135 		} else {
3136 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3137 			pll_state->mg_pll_bias_mask = -1U;
3138 		}
3139 
3140 		pll_state->mg_pll_tdc_coldst_bias &=
3141 			pll_state->mg_pll_tdc_coldst_bias_mask;
3142 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3143 	}
3144 
3145 	return 0;
3146 }
3147 
3148 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3149 				   const struct intel_shared_dpll *pll,
3150 				   const struct intel_dpll_hw_state *pll_state)
3151 {
3152 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3153 	u64 tmp;
3154 
3155 	ref_clock = i915->display.dpll.ref_clks.nssc;
3156 
3157 	if (DISPLAY_VER(i915) >= 12) {
3158 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3159 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3160 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3161 
3162 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3163 			m2_frac = pll_state->mg_pll_bias &
3164 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3165 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3166 		} else {
3167 			m2_frac = 0;
3168 		}
3169 	} else {
3170 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3171 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3172 
3173 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3174 			m2_frac = pll_state->mg_pll_div0 &
3175 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3176 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3177 		} else {
3178 			m2_frac = 0;
3179 		}
3180 	}
3181 
3182 	switch (pll_state->mg_clktop2_hsclkctl &
3183 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3184 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3185 		div1 = 2;
3186 		break;
3187 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3188 		div1 = 3;
3189 		break;
3190 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3191 		div1 = 5;
3192 		break;
3193 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3194 		div1 = 7;
3195 		break;
3196 	default:
3197 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3198 		return 0;
3199 	}
3200 
3201 	div2 = (pll_state->mg_clktop2_hsclkctl &
3202 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3203 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3204 
3205 	/* div2 value of 0 is same as 1 means no div */
3206 	if (div2 == 0)
3207 		div2 = 1;
3208 
3209 	/*
3210 	 * Adjust the original formula to delay the division by 2^22 in order to
3211 	 * minimize possible rounding errors.
3212 	 */
3213 	tmp = (u64)m1 * m2_int * ref_clock +
3214 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3215 	tmp = div_u64(tmp, 5 * div1 * div2);
3216 
3217 	return tmp;
3218 }
3219 
3220 /**
3221  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3222  * @crtc_state: state for the CRTC to select the DPLL for
3223  * @port_dpll_id: the active @port_dpll_id to select
3224  *
3225  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3226  * CRTC.
3227  */
3228 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3229 			      enum icl_port_dpll_id port_dpll_id)
3230 {
3231 	struct icl_port_dpll *port_dpll =
3232 		&crtc_state->icl_port_dplls[port_dpll_id];
3233 
3234 	crtc_state->shared_dpll = port_dpll->pll;
3235 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3236 }
3237 
3238 static void icl_update_active_dpll(struct intel_atomic_state *state,
3239 				   struct intel_crtc *crtc,
3240 				   struct intel_encoder *encoder)
3241 {
3242 	struct intel_crtc_state *crtc_state =
3243 		intel_atomic_get_new_crtc_state(state, crtc);
3244 	struct intel_digital_port *primary_port;
3245 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3246 
3247 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3248 		enc_to_mst(encoder)->primary :
3249 		enc_to_dig_port(encoder);
3250 
3251 	if (primary_port &&
3252 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3253 	     intel_tc_port_in_legacy_mode(primary_port)))
3254 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3255 
3256 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3257 }
3258 
3259 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3260 				      struct intel_crtc *crtc)
3261 {
3262 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3263 	struct intel_crtc_state *crtc_state =
3264 		intel_atomic_get_new_crtc_state(state, crtc);
3265 	struct icl_port_dpll *port_dpll =
3266 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3267 	struct skl_wrpll_params pll_params = {};
3268 	int ret;
3269 
3270 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3271 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3272 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3273 	else
3274 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3275 
3276 	if (ret)
3277 		return ret;
3278 
3279 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3280 
3281 	/* this is mainly for the fastset check */
3282 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3283 
3284 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3285 							    &port_dpll->hw_state);
3286 
3287 	return 0;
3288 }
3289 
3290 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3291 				  struct intel_crtc *crtc,
3292 				  struct intel_encoder *encoder)
3293 {
3294 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3295 	struct intel_crtc_state *crtc_state =
3296 		intel_atomic_get_new_crtc_state(state, crtc);
3297 	struct icl_port_dpll *port_dpll =
3298 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3299 	enum port port = encoder->port;
3300 	unsigned long dpll_mask;
3301 
3302 	if (IS_ALDERLAKE_S(i915)) {
3303 		dpll_mask =
3304 			BIT(DPLL_ID_DG1_DPLL3) |
3305 			BIT(DPLL_ID_DG1_DPLL2) |
3306 			BIT(DPLL_ID_ICL_DPLL1) |
3307 			BIT(DPLL_ID_ICL_DPLL0);
3308 	} else if (IS_DG1(i915)) {
3309 		if (port == PORT_D || port == PORT_E) {
3310 			dpll_mask =
3311 				BIT(DPLL_ID_DG1_DPLL2) |
3312 				BIT(DPLL_ID_DG1_DPLL3);
3313 		} else {
3314 			dpll_mask =
3315 				BIT(DPLL_ID_DG1_DPLL0) |
3316 				BIT(DPLL_ID_DG1_DPLL1);
3317 		}
3318 	} else if (IS_ROCKETLAKE(i915)) {
3319 		dpll_mask =
3320 			BIT(DPLL_ID_EHL_DPLL4) |
3321 			BIT(DPLL_ID_ICL_DPLL1) |
3322 			BIT(DPLL_ID_ICL_DPLL0);
3323 	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3324 		   port != PORT_A) {
3325 		dpll_mask =
3326 			BIT(DPLL_ID_EHL_DPLL4) |
3327 			BIT(DPLL_ID_ICL_DPLL1) |
3328 			BIT(DPLL_ID_ICL_DPLL0);
3329 	} else {
3330 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3331 	}
3332 
3333 	/* Eliminate DPLLs from consideration if reserved by HTI */
3334 	dpll_mask &= ~intel_hti_dpll_mask(i915);
3335 
3336 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3337 						&port_dpll->hw_state,
3338 						dpll_mask);
3339 	if (!port_dpll->pll)
3340 		return -EINVAL;
3341 
3342 	intel_reference_shared_dpll(state, crtc,
3343 				    port_dpll->pll, &port_dpll->hw_state);
3344 
3345 	icl_update_active_dpll(state, crtc, encoder);
3346 
3347 	return 0;
3348 }
3349 
3350 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3351 				    struct intel_crtc *crtc)
3352 {
3353 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3354 	struct intel_crtc_state *crtc_state =
3355 		intel_atomic_get_new_crtc_state(state, crtc);
3356 	const struct intel_crtc_state *old_crtc_state =
3357 		intel_atomic_get_old_crtc_state(state, crtc);
3358 	struct icl_port_dpll *port_dpll =
3359 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3360 	struct skl_wrpll_params pll_params = {};
3361 	int ret;
3362 
3363 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3364 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3365 	if (ret)
3366 		return ret;
3367 
3368 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3369 
3370 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3371 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3372 	if (ret)
3373 		return ret;
3374 
3375 	/* this is mainly for the fastset check */
3376 	if (old_crtc_state->shared_dpll &&
3377 	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3378 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3379 	else
3380 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3381 
3382 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3383 							 &port_dpll->hw_state);
3384 
3385 	return 0;
3386 }
3387 
3388 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3389 				struct intel_crtc *crtc,
3390 				struct intel_encoder *encoder)
3391 {
3392 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3393 	struct intel_crtc_state *crtc_state =
3394 		intel_atomic_get_new_crtc_state(state, crtc);
3395 	struct icl_port_dpll *port_dpll =
3396 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3397 	enum intel_dpll_id dpll_id;
3398 	int ret;
3399 
3400 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3401 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3402 						&port_dpll->hw_state,
3403 						BIT(DPLL_ID_ICL_TBTPLL));
3404 	if (!port_dpll->pll)
3405 		return -EINVAL;
3406 	intel_reference_shared_dpll(state, crtc,
3407 				    port_dpll->pll, &port_dpll->hw_state);
3408 
3409 
3410 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3411 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
3412 							 encoder->port));
3413 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3414 						&port_dpll->hw_state,
3415 						BIT(dpll_id));
3416 	if (!port_dpll->pll) {
3417 		ret = -EINVAL;
3418 		goto err_unreference_tbt_pll;
3419 	}
3420 	intel_reference_shared_dpll(state, crtc,
3421 				    port_dpll->pll, &port_dpll->hw_state);
3422 
3423 	icl_update_active_dpll(state, crtc, encoder);
3424 
3425 	return 0;
3426 
3427 err_unreference_tbt_pll:
3428 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3429 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3430 
3431 	return ret;
3432 }
3433 
3434 static int icl_compute_dplls(struct intel_atomic_state *state,
3435 			     struct intel_crtc *crtc,
3436 			     struct intel_encoder *encoder)
3437 {
3438 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3439 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3440 
3441 	if (intel_phy_is_combo(i915, phy))
3442 		return icl_compute_combo_phy_dpll(state, crtc);
3443 	else if (intel_phy_is_tc(i915, phy))
3444 		return icl_compute_tc_phy_dplls(state, crtc);
3445 
3446 	MISSING_CASE(phy);
3447 
3448 	return 0;
3449 }
3450 
3451 static int icl_get_dplls(struct intel_atomic_state *state,
3452 			 struct intel_crtc *crtc,
3453 			 struct intel_encoder *encoder)
3454 {
3455 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3456 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3457 
3458 	if (intel_phy_is_combo(i915, phy))
3459 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3460 	else if (intel_phy_is_tc(i915, phy))
3461 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3462 
3463 	MISSING_CASE(phy);
3464 
3465 	return -EINVAL;
3466 }
3467 
3468 static void icl_put_dplls(struct intel_atomic_state *state,
3469 			  struct intel_crtc *crtc)
3470 {
3471 	const struct intel_crtc_state *old_crtc_state =
3472 		intel_atomic_get_old_crtc_state(state, crtc);
3473 	struct intel_crtc_state *new_crtc_state =
3474 		intel_atomic_get_new_crtc_state(state, crtc);
3475 	enum icl_port_dpll_id id;
3476 
3477 	new_crtc_state->shared_dpll = NULL;
3478 
3479 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3480 		const struct icl_port_dpll *old_port_dpll =
3481 			&old_crtc_state->icl_port_dplls[id];
3482 		struct icl_port_dpll *new_port_dpll =
3483 			&new_crtc_state->icl_port_dplls[id];
3484 
3485 		new_port_dpll->pll = NULL;
3486 
3487 		if (!old_port_dpll->pll)
3488 			continue;
3489 
3490 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3491 	}
3492 }
3493 
3494 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3495 				struct intel_shared_dpll *pll,
3496 				struct intel_dpll_hw_state *hw_state)
3497 {
3498 	const enum intel_dpll_id id = pll->info->id;
3499 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3500 	intel_wakeref_t wakeref;
3501 	bool ret = false;
3502 	u32 val;
3503 
3504 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3505 
3506 	wakeref = intel_display_power_get_if_enabled(i915,
3507 						     POWER_DOMAIN_DISPLAY_CORE);
3508 	if (!wakeref)
3509 		return false;
3510 
3511 	val = intel_de_read(i915, enable_reg);
3512 	if (!(val & PLL_ENABLE))
3513 		goto out;
3514 
3515 	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3516 						  MG_REFCLKIN_CTL(tc_port));
3517 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3518 
3519 	hw_state->mg_clktop2_coreclkctl1 =
3520 		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3521 	hw_state->mg_clktop2_coreclkctl1 &=
3522 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3523 
3524 	hw_state->mg_clktop2_hsclkctl =
3525 		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3526 	hw_state->mg_clktop2_hsclkctl &=
3527 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3528 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3529 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3530 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3531 
3532 	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3533 	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3534 	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3535 	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3536 						   MG_PLL_FRAC_LOCK(tc_port));
3537 	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3538 
3539 	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3540 	hw_state->mg_pll_tdc_coldst_bias =
3541 		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3542 
3543 	if (i915->display.dpll.ref_clks.nssc == 38400) {
3544 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3545 		hw_state->mg_pll_bias_mask = 0;
3546 	} else {
3547 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3548 		hw_state->mg_pll_bias_mask = -1U;
3549 	}
3550 
3551 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3552 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3553 
3554 	ret = true;
3555 out:
3556 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3557 	return ret;
3558 }
3559 
3560 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3561 				 struct intel_shared_dpll *pll,
3562 				 struct intel_dpll_hw_state *hw_state)
3563 {
3564 	const enum intel_dpll_id id = pll->info->id;
3565 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3566 	intel_wakeref_t wakeref;
3567 	bool ret = false;
3568 	u32 val;
3569 
3570 	wakeref = intel_display_power_get_if_enabled(i915,
3571 						     POWER_DOMAIN_DISPLAY_CORE);
3572 	if (!wakeref)
3573 		return false;
3574 
3575 	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3576 	if (!(val & PLL_ENABLE))
3577 		goto out;
3578 
3579 	/*
3580 	 * All registers read here have the same HIP_INDEX_REG even though
3581 	 * they are on different building blocks
3582 	 */
3583 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3584 						       DKL_REFCLKIN_CTL(tc_port));
3585 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3586 
3587 	hw_state->mg_clktop2_hsclkctl =
3588 		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3589 	hw_state->mg_clktop2_hsclkctl &=
3590 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3591 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3592 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3593 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3594 
3595 	hw_state->mg_clktop2_coreclkctl1 =
3596 		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3597 	hw_state->mg_clktop2_coreclkctl1 &=
3598 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3599 
3600 	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3601 	val = DKL_PLL_DIV0_MASK;
3602 	if (i915->display.vbt.override_afc_startup)
3603 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3604 	hw_state->mg_pll_div0 &= val;
3605 
3606 	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3607 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3608 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3609 
3610 	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3611 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3612 				 DKL_PLL_SSC_STEP_LEN_MASK |
3613 				 DKL_PLL_SSC_STEP_NUM_MASK |
3614 				 DKL_PLL_SSC_EN);
3615 
3616 	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3617 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3618 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3619 
3620 	hw_state->mg_pll_tdc_coldst_bias =
3621 		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3622 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3623 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3624 
3625 	ret = true;
3626 out:
3627 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3628 	return ret;
3629 }
3630 
3631 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3632 				 struct intel_shared_dpll *pll,
3633 				 struct intel_dpll_hw_state *hw_state,
3634 				 i915_reg_t enable_reg)
3635 {
3636 	const enum intel_dpll_id id = pll->info->id;
3637 	intel_wakeref_t wakeref;
3638 	bool ret = false;
3639 	u32 val;
3640 
3641 	wakeref = intel_display_power_get_if_enabled(i915,
3642 						     POWER_DOMAIN_DISPLAY_CORE);
3643 	if (!wakeref)
3644 		return false;
3645 
3646 	val = intel_de_read(i915, enable_reg);
3647 	if (!(val & PLL_ENABLE))
3648 		goto out;
3649 
3650 	if (IS_ALDERLAKE_S(i915)) {
3651 		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3652 		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3653 	} else if (IS_DG1(i915)) {
3654 		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3655 		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3656 	} else if (IS_ROCKETLAKE(i915)) {
3657 		hw_state->cfgcr0 = intel_de_read(i915,
3658 						 RKL_DPLL_CFGCR0(id));
3659 		hw_state->cfgcr1 = intel_de_read(i915,
3660 						 RKL_DPLL_CFGCR1(id));
3661 	} else if (DISPLAY_VER(i915) >= 12) {
3662 		hw_state->cfgcr0 = intel_de_read(i915,
3663 						 TGL_DPLL_CFGCR0(id));
3664 		hw_state->cfgcr1 = intel_de_read(i915,
3665 						 TGL_DPLL_CFGCR1(id));
3666 		if (i915->display.vbt.override_afc_startup) {
3667 			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3668 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3669 		}
3670 	} else {
3671 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3672 		    id == DPLL_ID_EHL_DPLL4) {
3673 			hw_state->cfgcr0 = intel_de_read(i915,
3674 							 ICL_DPLL_CFGCR0(4));
3675 			hw_state->cfgcr1 = intel_de_read(i915,
3676 							 ICL_DPLL_CFGCR1(4));
3677 		} else {
3678 			hw_state->cfgcr0 = intel_de_read(i915,
3679 							 ICL_DPLL_CFGCR0(id));
3680 			hw_state->cfgcr1 = intel_de_read(i915,
3681 							 ICL_DPLL_CFGCR1(id));
3682 		}
3683 	}
3684 
3685 	ret = true;
3686 out:
3687 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3688 	return ret;
3689 }
3690 
3691 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3692 				   struct intel_shared_dpll *pll,
3693 				   struct intel_dpll_hw_state *hw_state)
3694 {
3695 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3696 
3697 	return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3698 }
3699 
3700 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3701 				 struct intel_shared_dpll *pll,
3702 				 struct intel_dpll_hw_state *hw_state)
3703 {
3704 	return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3705 }
3706 
3707 static void icl_dpll_write(struct drm_i915_private *i915,
3708 			   struct intel_shared_dpll *pll)
3709 {
3710 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3711 	const enum intel_dpll_id id = pll->info->id;
3712 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3713 
3714 	if (IS_ALDERLAKE_S(i915)) {
3715 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3716 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3717 	} else if (IS_DG1(i915)) {
3718 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3719 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3720 	} else if (IS_ROCKETLAKE(i915)) {
3721 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3722 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3723 	} else if (DISPLAY_VER(i915) >= 12) {
3724 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3725 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3726 		div0_reg = TGL_DPLL0_DIV0(id);
3727 	} else {
3728 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3729 		    id == DPLL_ID_EHL_DPLL4) {
3730 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3731 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3732 		} else {
3733 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3734 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3735 		}
3736 	}
3737 
3738 	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3739 	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3740 	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3741 			 !i915_mmio_reg_valid(div0_reg));
3742 	if (i915->display.vbt.override_afc_startup &&
3743 	    i915_mmio_reg_valid(div0_reg))
3744 		intel_de_rmw(i915, div0_reg,
3745 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3746 	intel_de_posting_read(i915, cfgcr1_reg);
3747 }
3748 
3749 static void icl_mg_pll_write(struct drm_i915_private *i915,
3750 			     struct intel_shared_dpll *pll)
3751 {
3752 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3753 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3754 
3755 	/*
3756 	 * Some of the following registers have reserved fields, so program
3757 	 * these with RMW based on a mask. The mask can be fixed or generated
3758 	 * during the calc/readout phase if the mask depends on some other HW
3759 	 * state like refclk, see icl_calc_mg_pll_state().
3760 	 */
3761 	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3762 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3763 
3764 	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3765 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3766 		     hw_state->mg_clktop2_coreclkctl1);
3767 
3768 	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3769 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3770 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3771 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3772 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3773 		     hw_state->mg_clktop2_hsclkctl);
3774 
3775 	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3776 	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3777 	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3778 	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3779 		       hw_state->mg_pll_frac_lock);
3780 	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3781 
3782 	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3783 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3784 
3785 	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3786 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3787 		     hw_state->mg_pll_tdc_coldst_bias);
3788 
3789 	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3790 }
3791 
3792 static void dkl_pll_write(struct drm_i915_private *i915,
3793 			  struct intel_shared_dpll *pll)
3794 {
3795 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3796 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3797 	u32 val;
3798 
3799 	/*
3800 	 * All registers programmed here have the same HIP_INDEX_REG even
3801 	 * though on different building block
3802 	 */
3803 	/* All the registers are RMW */
3804 	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3805 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3806 	val |= hw_state->mg_refclkin_ctl;
3807 	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3808 
3809 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3810 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3811 	val |= hw_state->mg_clktop2_coreclkctl1;
3812 	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3813 
3814 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3815 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3816 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3817 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3818 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3819 	val |= hw_state->mg_clktop2_hsclkctl;
3820 	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3821 
3822 	val = DKL_PLL_DIV0_MASK;
3823 	if (i915->display.vbt.override_afc_startup)
3824 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3825 	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3826 			  hw_state->mg_pll_div0);
3827 
3828 	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3829 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3830 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3831 	val |= hw_state->mg_pll_div1;
3832 	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3833 
3834 	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3835 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3836 		 DKL_PLL_SSC_STEP_LEN_MASK |
3837 		 DKL_PLL_SSC_STEP_NUM_MASK |
3838 		 DKL_PLL_SSC_EN);
3839 	val |= hw_state->mg_pll_ssc;
3840 	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3841 
3842 	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3843 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3844 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3845 	val |= hw_state->mg_pll_bias;
3846 	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3847 
3848 	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3849 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3850 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3851 	val |= hw_state->mg_pll_tdc_coldst_bias;
3852 	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3853 
3854 	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3855 }
3856 
3857 static void icl_pll_power_enable(struct drm_i915_private *i915,
3858 				 struct intel_shared_dpll *pll,
3859 				 i915_reg_t enable_reg)
3860 {
3861 	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3862 
3863 	/*
3864 	 * The spec says we need to "wait" but it also says it should be
3865 	 * immediate.
3866 	 */
3867 	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3868 		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3869 			pll->info->id);
3870 }
3871 
3872 static void icl_pll_enable(struct drm_i915_private *i915,
3873 			   struct intel_shared_dpll *pll,
3874 			   i915_reg_t enable_reg)
3875 {
3876 	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3877 
3878 	/* Timeout is actually 600us. */
3879 	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3880 		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3881 }
3882 
3883 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3884 {
3885 	u32 val;
3886 
3887 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3888 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3889 		return;
3890 	/*
3891 	 * Wa_16011069516:adl-p[a0]
3892 	 *
3893 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3894 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3895 	 * sanity check this assumption with a double read, which presumably
3896 	 * returns the correct value even with clock gating on.
3897 	 *
3898 	 * Instead of the usual place for workarounds we apply this one here,
3899 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3900 	 */
3901 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3902 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3903 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3904 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3905 }
3906 
3907 static void combo_pll_enable(struct drm_i915_private *i915,
3908 			     struct intel_shared_dpll *pll)
3909 {
3910 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3911 
3912 	icl_pll_power_enable(i915, pll, enable_reg);
3913 
3914 	icl_dpll_write(i915, pll);
3915 
3916 	/*
3917 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3918 	 * paths should already be setting the appropriate voltage, hence we do
3919 	 * nothing here.
3920 	 */
3921 
3922 	icl_pll_enable(i915, pll, enable_reg);
3923 
3924 	adlp_cmtg_clock_gating_wa(i915, pll);
3925 
3926 	/* DVFS post sequence would be here. See the comment above. */
3927 }
3928 
3929 static void tbt_pll_enable(struct drm_i915_private *i915,
3930 			   struct intel_shared_dpll *pll)
3931 {
3932 	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3933 
3934 	icl_dpll_write(i915, pll);
3935 
3936 	/*
3937 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3938 	 * paths should already be setting the appropriate voltage, hence we do
3939 	 * nothing here.
3940 	 */
3941 
3942 	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3943 
3944 	/* DVFS post sequence would be here. See the comment above. */
3945 }
3946 
3947 static void mg_pll_enable(struct drm_i915_private *i915,
3948 			  struct intel_shared_dpll *pll)
3949 {
3950 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3951 
3952 	icl_pll_power_enable(i915, pll, enable_reg);
3953 
3954 	if (DISPLAY_VER(i915) >= 12)
3955 		dkl_pll_write(i915, pll);
3956 	else
3957 		icl_mg_pll_write(i915, pll);
3958 
3959 	/*
3960 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3961 	 * paths should already be setting the appropriate voltage, hence we do
3962 	 * nothing here.
3963 	 */
3964 
3965 	icl_pll_enable(i915, pll, enable_reg);
3966 
3967 	/* DVFS post sequence would be here. See the comment above. */
3968 }
3969 
3970 static void icl_pll_disable(struct drm_i915_private *i915,
3971 			    struct intel_shared_dpll *pll,
3972 			    i915_reg_t enable_reg)
3973 {
3974 	/* The first steps are done by intel_ddi_post_disable(). */
3975 
3976 	/*
3977 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3978 	 * paths should already be setting the appropriate voltage, hence we do
3979 	 * nothing here.
3980 	 */
3981 
3982 	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
3983 
3984 	/* Timeout is actually 1us. */
3985 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3986 		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3987 
3988 	/* DVFS post sequence would be here. See the comment above. */
3989 
3990 	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
3991 
3992 	/*
3993 	 * The spec says we need to "wait" but it also says it should be
3994 	 * immediate.
3995 	 */
3996 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3997 		drm_err(&i915->drm, "PLL %d Power not disabled\n",
3998 			pll->info->id);
3999 }
4000 
4001 static void combo_pll_disable(struct drm_i915_private *i915,
4002 			      struct intel_shared_dpll *pll)
4003 {
4004 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
4005 
4006 	icl_pll_disable(i915, pll, enable_reg);
4007 }
4008 
4009 static void tbt_pll_disable(struct drm_i915_private *i915,
4010 			    struct intel_shared_dpll *pll)
4011 {
4012 	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4013 }
4014 
4015 static void mg_pll_disable(struct drm_i915_private *i915,
4016 			   struct intel_shared_dpll *pll)
4017 {
4018 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4019 
4020 	icl_pll_disable(i915, pll, enable_reg);
4021 }
4022 
4023 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4024 {
4025 	/* No SSC ref */
4026 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4027 }
4028 
4029 static void icl_dump_hw_state(struct drm_i915_private *i915,
4030 			      const struct intel_dpll_hw_state *hw_state)
4031 {
4032 	drm_dbg_kms(&i915->drm,
4033 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4034 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4035 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4036 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4037 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4038 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4039 		    hw_state->cfgcr0, hw_state->cfgcr1,
4040 		    hw_state->div0,
4041 		    hw_state->mg_refclkin_ctl,
4042 		    hw_state->mg_clktop2_coreclkctl1,
4043 		    hw_state->mg_clktop2_hsclkctl,
4044 		    hw_state->mg_pll_div0,
4045 		    hw_state->mg_pll_div1,
4046 		    hw_state->mg_pll_lf,
4047 		    hw_state->mg_pll_frac_lock,
4048 		    hw_state->mg_pll_ssc,
4049 		    hw_state->mg_pll_bias,
4050 		    hw_state->mg_pll_tdc_coldst_bias);
4051 }
4052 
4053 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
4054 				 const struct intel_dpll_hw_state *b)
4055 {
4056 	/* FIXME split combo vs. mg more thoroughly */
4057 	return a->cfgcr0 == b->cfgcr0 &&
4058 		a->cfgcr1 == b->cfgcr1 &&
4059 		a->div0 == b->div0 &&
4060 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4061 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4062 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4063 		a->mg_pll_div0 == b->mg_pll_div0 &&
4064 		a->mg_pll_div1 == b->mg_pll_div1 &&
4065 		a->mg_pll_lf == b->mg_pll_lf &&
4066 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4067 		a->mg_pll_ssc == b->mg_pll_ssc &&
4068 		a->mg_pll_bias == b->mg_pll_bias &&
4069 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4070 }
4071 
4072 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4073 	.enable = combo_pll_enable,
4074 	.disable = combo_pll_disable,
4075 	.get_hw_state = combo_pll_get_hw_state,
4076 	.get_freq = icl_ddi_combo_pll_get_freq,
4077 };
4078 
4079 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4080 	.enable = tbt_pll_enable,
4081 	.disable = tbt_pll_disable,
4082 	.get_hw_state = tbt_pll_get_hw_state,
4083 	.get_freq = icl_ddi_tbt_pll_get_freq,
4084 };
4085 
4086 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4087 	.enable = mg_pll_enable,
4088 	.disable = mg_pll_disable,
4089 	.get_hw_state = mg_pll_get_hw_state,
4090 	.get_freq = icl_ddi_mg_pll_get_freq,
4091 };
4092 
4093 static const struct dpll_info icl_plls[] = {
4094 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4095 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4096 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4097 	  .is_alt_port_dpll = true, },
4098 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4099 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4100 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4101 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4102 	{}
4103 };
4104 
4105 static const struct intel_dpll_mgr icl_pll_mgr = {
4106 	.dpll_info = icl_plls,
4107 	.compute_dplls = icl_compute_dplls,
4108 	.get_dplls = icl_get_dplls,
4109 	.put_dplls = icl_put_dplls,
4110 	.update_active_dpll = icl_update_active_dpll,
4111 	.update_ref_clks = icl_update_dpll_ref_clks,
4112 	.dump_hw_state = icl_dump_hw_state,
4113 	.compare_hw_state = icl_compare_hw_state,
4114 };
4115 
4116 static const struct dpll_info ehl_plls[] = {
4117 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4118 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4119 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4120 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4121 	{}
4122 };
4123 
4124 static const struct intel_dpll_mgr ehl_pll_mgr = {
4125 	.dpll_info = ehl_plls,
4126 	.compute_dplls = icl_compute_dplls,
4127 	.get_dplls = icl_get_dplls,
4128 	.put_dplls = icl_put_dplls,
4129 	.update_ref_clks = icl_update_dpll_ref_clks,
4130 	.dump_hw_state = icl_dump_hw_state,
4131 	.compare_hw_state = icl_compare_hw_state,
4132 };
4133 
4134 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4135 	.enable = mg_pll_enable,
4136 	.disable = mg_pll_disable,
4137 	.get_hw_state = dkl_pll_get_hw_state,
4138 	.get_freq = icl_ddi_mg_pll_get_freq,
4139 };
4140 
4141 static const struct dpll_info tgl_plls[] = {
4142 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4143 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4144 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4145 	  .is_alt_port_dpll = true, },
4146 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4147 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4148 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4149 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4150 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4151 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4152 	{}
4153 };
4154 
4155 static const struct intel_dpll_mgr tgl_pll_mgr = {
4156 	.dpll_info = tgl_plls,
4157 	.compute_dplls = icl_compute_dplls,
4158 	.get_dplls = icl_get_dplls,
4159 	.put_dplls = icl_put_dplls,
4160 	.update_active_dpll = icl_update_active_dpll,
4161 	.update_ref_clks = icl_update_dpll_ref_clks,
4162 	.dump_hw_state = icl_dump_hw_state,
4163 	.compare_hw_state = icl_compare_hw_state,
4164 };
4165 
4166 static const struct dpll_info rkl_plls[] = {
4167 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4168 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4169 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4170 	{}
4171 };
4172 
4173 static const struct intel_dpll_mgr rkl_pll_mgr = {
4174 	.dpll_info = rkl_plls,
4175 	.compute_dplls = icl_compute_dplls,
4176 	.get_dplls = icl_get_dplls,
4177 	.put_dplls = icl_put_dplls,
4178 	.update_ref_clks = icl_update_dpll_ref_clks,
4179 	.dump_hw_state = icl_dump_hw_state,
4180 	.compare_hw_state = icl_compare_hw_state,
4181 };
4182 
4183 static const struct dpll_info dg1_plls[] = {
4184 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4185 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4186 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4187 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4188 	{}
4189 };
4190 
4191 static const struct intel_dpll_mgr dg1_pll_mgr = {
4192 	.dpll_info = dg1_plls,
4193 	.compute_dplls = icl_compute_dplls,
4194 	.get_dplls = icl_get_dplls,
4195 	.put_dplls = icl_put_dplls,
4196 	.update_ref_clks = icl_update_dpll_ref_clks,
4197 	.dump_hw_state = icl_dump_hw_state,
4198 	.compare_hw_state = icl_compare_hw_state,
4199 };
4200 
4201 static const struct dpll_info adls_plls[] = {
4202 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4203 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4204 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4205 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4206 	{}
4207 };
4208 
4209 static const struct intel_dpll_mgr adls_pll_mgr = {
4210 	.dpll_info = adls_plls,
4211 	.compute_dplls = icl_compute_dplls,
4212 	.get_dplls = icl_get_dplls,
4213 	.put_dplls = icl_put_dplls,
4214 	.update_ref_clks = icl_update_dpll_ref_clks,
4215 	.dump_hw_state = icl_dump_hw_state,
4216 	.compare_hw_state = icl_compare_hw_state,
4217 };
4218 
4219 static const struct dpll_info adlp_plls[] = {
4220 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4221 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4222 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4223 	  .is_alt_port_dpll = true, },
4224 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4225 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4226 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4227 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4228 	{}
4229 };
4230 
4231 static const struct intel_dpll_mgr adlp_pll_mgr = {
4232 	.dpll_info = adlp_plls,
4233 	.compute_dplls = icl_compute_dplls,
4234 	.get_dplls = icl_get_dplls,
4235 	.put_dplls = icl_put_dplls,
4236 	.update_active_dpll = icl_update_active_dpll,
4237 	.update_ref_clks = icl_update_dpll_ref_clks,
4238 	.dump_hw_state = icl_dump_hw_state,
4239 	.compare_hw_state = icl_compare_hw_state,
4240 };
4241 
4242 /**
4243  * intel_shared_dpll_init - Initialize shared DPLLs
4244  * @i915: i915 device
4245  *
4246  * Initialize shared DPLLs for @i915.
4247  */
4248 void intel_shared_dpll_init(struct drm_i915_private *i915)
4249 {
4250 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4251 	const struct dpll_info *dpll_info;
4252 	int i;
4253 
4254 	mutex_init(&i915->display.dpll.lock);
4255 
4256 	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4257 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4258 		dpll_mgr = NULL;
4259 	else if (IS_ALDERLAKE_P(i915))
4260 		dpll_mgr = &adlp_pll_mgr;
4261 	else if (IS_ALDERLAKE_S(i915))
4262 		dpll_mgr = &adls_pll_mgr;
4263 	else if (IS_DG1(i915))
4264 		dpll_mgr = &dg1_pll_mgr;
4265 	else if (IS_ROCKETLAKE(i915))
4266 		dpll_mgr = &rkl_pll_mgr;
4267 	else if (DISPLAY_VER(i915) >= 12)
4268 		dpll_mgr = &tgl_pll_mgr;
4269 	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4270 		dpll_mgr = &ehl_pll_mgr;
4271 	else if (DISPLAY_VER(i915) >= 11)
4272 		dpll_mgr = &icl_pll_mgr;
4273 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4274 		dpll_mgr = &bxt_pll_mgr;
4275 	else if (DISPLAY_VER(i915) == 9)
4276 		dpll_mgr = &skl_pll_mgr;
4277 	else if (HAS_DDI(i915))
4278 		dpll_mgr = &hsw_pll_mgr;
4279 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4280 		dpll_mgr = &pch_pll_mgr;
4281 
4282 	if (!dpll_mgr)
4283 		return;
4284 
4285 	dpll_info = dpll_mgr->dpll_info;
4286 
4287 	for (i = 0; dpll_info[i].name; i++) {
4288 		if (drm_WARN_ON(&i915->drm,
4289 				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4290 			break;
4291 
4292 		/* must fit into unsigned long bitmask on 32bit */
4293 		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4294 			break;
4295 
4296 		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4297 		i915->display.dpll.shared_dplls[i].index = i;
4298 	}
4299 
4300 	i915->display.dpll.mgr = dpll_mgr;
4301 	i915->display.dpll.num_shared_dpll = i;
4302 }
4303 
4304 /**
4305  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4306  * @state: atomic state
4307  * @crtc: CRTC to compute DPLLs for
4308  * @encoder: encoder
4309  *
4310  * This function computes the DPLL state for the given CRTC and encoder.
4311  *
4312  * The new configuration in the atomic commit @state is made effective by
4313  * calling intel_shared_dpll_swap_state().
4314  *
4315  * Returns:
4316  * 0 on success, negative error code on falure.
4317  */
4318 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4319 			       struct intel_crtc *crtc,
4320 			       struct intel_encoder *encoder)
4321 {
4322 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4323 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4324 
4325 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4326 		return -EINVAL;
4327 
4328 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4329 }
4330 
4331 /**
4332  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4333  * @state: atomic state
4334  * @crtc: CRTC to reserve DPLLs for
4335  * @encoder: encoder
4336  *
4337  * This function reserves all required DPLLs for the given CRTC and encoder
4338  * combination in the current atomic commit @state and the new @crtc atomic
4339  * state.
4340  *
4341  * The new configuration in the atomic commit @state is made effective by
4342  * calling intel_shared_dpll_swap_state().
4343  *
4344  * The reserved DPLLs should be released by calling
4345  * intel_release_shared_dplls().
4346  *
4347  * Returns:
4348  * 0 if all required DPLLs were successfully reserved,
4349  * negative error code otherwise.
4350  */
4351 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4352 			       struct intel_crtc *crtc,
4353 			       struct intel_encoder *encoder)
4354 {
4355 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4356 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4357 
4358 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4359 		return -EINVAL;
4360 
4361 	return dpll_mgr->get_dplls(state, crtc, encoder);
4362 }
4363 
4364 /**
4365  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4366  * @state: atomic state
4367  * @crtc: crtc from which the DPLLs are to be released
4368  *
4369  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4370  * from the current atomic commit @state and the old @crtc atomic state.
4371  *
4372  * The new configuration in the atomic commit @state is made effective by
4373  * calling intel_shared_dpll_swap_state().
4374  */
4375 void intel_release_shared_dplls(struct intel_atomic_state *state,
4376 				struct intel_crtc *crtc)
4377 {
4378 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4379 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4380 
4381 	/*
4382 	 * FIXME: this function is called for every platform having a
4383 	 * compute_clock hook, even though the platform doesn't yet support
4384 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4385 	 * called on those.
4386 	 */
4387 	if (!dpll_mgr)
4388 		return;
4389 
4390 	dpll_mgr->put_dplls(state, crtc);
4391 }
4392 
4393 /**
4394  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4395  * @state: atomic state
4396  * @crtc: the CRTC for which to update the active DPLL
4397  * @encoder: encoder determining the type of port DPLL
4398  *
4399  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4400  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4401  * DPLL selected will be based on the current mode of the encoder's port.
4402  */
4403 void intel_update_active_dpll(struct intel_atomic_state *state,
4404 			      struct intel_crtc *crtc,
4405 			      struct intel_encoder *encoder)
4406 {
4407 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4408 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4409 
4410 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4411 		return;
4412 
4413 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4414 }
4415 
4416 /**
4417  * intel_dpll_get_freq - calculate the DPLL's output frequency
4418  * @i915: i915 device
4419  * @pll: DPLL for which to calculate the output frequency
4420  * @pll_state: DPLL state from which to calculate the output frequency
4421  *
4422  * Return the output frequency corresponding to @pll's passed in @pll_state.
4423  */
4424 int intel_dpll_get_freq(struct drm_i915_private *i915,
4425 			const struct intel_shared_dpll *pll,
4426 			const struct intel_dpll_hw_state *pll_state)
4427 {
4428 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4429 		return 0;
4430 
4431 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4432 }
4433 
4434 /**
4435  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4436  * @i915: i915 device
4437  * @pll: DPLL for which to calculate the output frequency
4438  * @hw_state: DPLL's hardware state
4439  *
4440  * Read out @pll's hardware state into @hw_state.
4441  */
4442 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4443 			     struct intel_shared_dpll *pll,
4444 			     struct intel_dpll_hw_state *hw_state)
4445 {
4446 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4447 }
4448 
4449 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4450 				  struct intel_shared_dpll *pll)
4451 {
4452 	struct intel_crtc *crtc;
4453 
4454 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4455 
4456 	if (pll->on && pll->info->power_domain)
4457 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4458 
4459 	pll->state.pipe_mask = 0;
4460 	for_each_intel_crtc(&i915->drm, crtc) {
4461 		struct intel_crtc_state *crtc_state =
4462 			to_intel_crtc_state(crtc->base.state);
4463 
4464 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4465 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4466 	}
4467 	pll->active_mask = pll->state.pipe_mask;
4468 
4469 	drm_dbg_kms(&i915->drm,
4470 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4471 		    pll->info->name, pll->state.pipe_mask, pll->on);
4472 }
4473 
4474 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4475 {
4476 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4477 		i915->display.dpll.mgr->update_ref_clks(i915);
4478 }
4479 
4480 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4481 {
4482 	struct intel_shared_dpll *pll;
4483 	int i;
4484 
4485 	for_each_shared_dpll(i915, pll, i)
4486 		readout_dpll_hw_state(i915, pll);
4487 }
4488 
4489 static void sanitize_dpll_state(struct drm_i915_private *i915,
4490 				struct intel_shared_dpll *pll)
4491 {
4492 	if (!pll->on)
4493 		return;
4494 
4495 	adlp_cmtg_clock_gating_wa(i915, pll);
4496 
4497 	if (pll->active_mask)
4498 		return;
4499 
4500 	drm_dbg_kms(&i915->drm,
4501 		    "%s enabled but not in use, disabling\n",
4502 		    pll->info->name);
4503 
4504 	_intel_disable_shared_dpll(i915, pll);
4505 }
4506 
4507 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4508 {
4509 	struct intel_shared_dpll *pll;
4510 	int i;
4511 
4512 	for_each_shared_dpll(i915, pll, i)
4513 		sanitize_dpll_state(i915, pll);
4514 }
4515 
4516 /**
4517  * intel_dpll_dump_hw_state - write hw_state to dmesg
4518  * @i915: i915 drm device
4519  * @hw_state: hw state to be written to the log
4520  *
4521  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4522  */
4523 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4524 			      const struct intel_dpll_hw_state *hw_state)
4525 {
4526 	if (i915->display.dpll.mgr) {
4527 		i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
4528 	} else {
4529 		/* fallback for platforms that don't use the shared dpll
4530 		 * infrastructure
4531 		 */
4532 		ibx_dump_hw_state(i915, hw_state);
4533 	}
4534 }
4535 
4536 /**
4537  * intel_dpll_compare_hw_state - compare the two states
4538  * @i915: i915 drm device
4539  * @a: first DPLL hw state
4540  * @b: second DPLL hw state
4541  *
4542  * Compare DPLL hw states @a and @b.
4543  *
4544  * Returns: true if the states are equal, false if the differ
4545  */
4546 bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4547 				 const struct intel_dpll_hw_state *a,
4548 				 const struct intel_dpll_hw_state *b)
4549 {
4550 	if (i915->display.dpll.mgr) {
4551 		return i915->display.dpll.mgr->compare_hw_state(a, b);
4552 	} else {
4553 		/* fallback for platforms that don't use the shared dpll
4554 		 * infrastructure
4555 		 */
4556 		return ibx_compare_hw_state(a, b);
4557 	}
4558 }
4559 
4560 static void
4561 verify_single_dpll_state(struct drm_i915_private *i915,
4562 			 struct intel_shared_dpll *pll,
4563 			 struct intel_crtc *crtc,
4564 			 const struct intel_crtc_state *new_crtc_state)
4565 {
4566 	struct intel_dpll_hw_state dpll_hw_state = {};
4567 	u8 pipe_mask;
4568 	bool active;
4569 
4570 	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4571 
4572 	if (!pll->info->always_on) {
4573 		I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4574 				"%s: pll in active use but not on in sw tracking\n",
4575 				pll->info->name);
4576 		I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4577 				"%s: pll is on but not used by any active pipe\n",
4578 				pll->info->name);
4579 		I915_STATE_WARN(i915, pll->on != active,
4580 				"%s: pll on state mismatch (expected %i, found %i)\n",
4581 				pll->info->name, pll->on, active);
4582 	}
4583 
4584 	if (!crtc) {
4585 		I915_STATE_WARN(i915,
4586 				pll->active_mask & ~pll->state.pipe_mask,
4587 				"%s: more active pll users than references: 0x%x vs 0x%x\n",
4588 				pll->info->name, pll->active_mask, pll->state.pipe_mask);
4589 
4590 		return;
4591 	}
4592 
4593 	pipe_mask = BIT(crtc->pipe);
4594 
4595 	if (new_crtc_state->hw.active)
4596 		I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4597 				"%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4598 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4599 	else
4600 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4601 				"%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4602 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4603 
4604 	I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4605 			"%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4606 			pll->info->name, pipe_mask, pll->state.pipe_mask);
4607 
4608 	I915_STATE_WARN(i915,
4609 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4610 					  sizeof(dpll_hw_state)),
4611 			"%s: pll hw state mismatch\n",
4612 			pll->info->name);
4613 }
4614 
4615 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4616 			      const struct intel_shared_dpll *new_pll)
4617 {
4618 	return old_pll && new_pll && old_pll != new_pll &&
4619 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4620 }
4621 
4622 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4623 				    struct intel_crtc *crtc)
4624 {
4625 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4626 	const struct intel_crtc_state *old_crtc_state =
4627 		intel_atomic_get_old_crtc_state(state, crtc);
4628 	const struct intel_crtc_state *new_crtc_state =
4629 		intel_atomic_get_new_crtc_state(state, crtc);
4630 
4631 	if (new_crtc_state->shared_dpll)
4632 		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4633 					 crtc, new_crtc_state);
4634 
4635 	if (old_crtc_state->shared_dpll &&
4636 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4637 		u8 pipe_mask = BIT(crtc->pipe);
4638 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4639 
4640 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4641 				"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4642 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4643 
4644 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4645 		I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4646 							 new_crtc_state->shared_dpll) &&
4647 				pll->state.pipe_mask & pipe_mask,
4648 				"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4649 				pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4650 	}
4651 }
4652 
4653 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4654 {
4655 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4656 	struct intel_shared_dpll *pll;
4657 	int i;
4658 
4659 	for_each_shared_dpll(i915, pll, i)
4660 		verify_single_dpll_state(i915, pll, NULL, NULL);
4661 }
4662