xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision ab6a0edb7ded060e84dc1a24e3936c86c3d048b9)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "i915_reg.h"
28 #include "intel_de.h"
29 #include "intel_display_types.h"
30 #include "intel_dkl_phy.h"
31 #include "intel_dkl_phy_regs.h"
32 #include "intel_dpio_phy.h"
33 #include "intel_dpll.h"
34 #include "intel_dpll_mgr.h"
35 #include "intel_hti.h"
36 #include "intel_mg_phy_regs.h"
37 #include "intel_pch_refclk.h"
38 #include "intel_tc.h"
39 
40 /**
41  * DOC: Display PLLs
42  *
43  * Display PLLs used for driving outputs vary by platform. While some have
44  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45  * from a pool. In the latter scenario, it is possible that multiple pipes
46  * share a PLL if their configurations match.
47  *
48  * This file provides an abstraction over display PLLs. The function
49  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
50  * users of a PLL are tracked and that tracking is integrated with the atomic
51  * modset interface. During an atomic operation, required PLLs can be reserved
52  * for a given CRTC and encoder configuration by calling
53  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54  * with intel_release_shared_dplls().
55  * Changes to the users are first staged in the atomic state, and then made
56  * effective by calling intel_shared_dpll_swap_state() during the atomic
57  * commit phase.
58  */
59 
60 /* platform specific hooks for managing DPLLs */
61 struct intel_shared_dpll_funcs {
62 	/*
63 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 	 * the pll is not already enabled.
65 	 */
66 	void (*enable)(struct drm_i915_private *i915,
67 		       struct intel_shared_dpll *pll);
68 
69 	/*
70 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 	 * only when it is safe to disable the pll, i.e., there are no more
72 	 * tracked users for it.
73 	 */
74 	void (*disable)(struct drm_i915_private *i915,
75 			struct intel_shared_dpll *pll);
76 
77 	/*
78 	 * Hook for reading the values currently programmed to the DPLL
79 	 * registers. This is used for initial hw state readout and state
80 	 * verification after a mode set.
81 	 */
82 	bool (*get_hw_state)(struct drm_i915_private *i915,
83 			     struct intel_shared_dpll *pll,
84 			     struct intel_dpll_hw_state *hw_state);
85 
86 	/*
87 	 * Hook for calculating the pll's output frequency based on its passed
88 	 * in state.
89 	 */
90 	int (*get_freq)(struct drm_i915_private *i915,
91 			const struct intel_shared_dpll *pll,
92 			const struct intel_dpll_hw_state *pll_state);
93 };
94 
95 struct intel_dpll_mgr {
96 	const struct dpll_info *dpll_info;
97 
98 	int (*compute_dplls)(struct intel_atomic_state *state,
99 			     struct intel_crtc *crtc,
100 			     struct intel_encoder *encoder);
101 	int (*get_dplls)(struct intel_atomic_state *state,
102 			 struct intel_crtc *crtc,
103 			 struct intel_encoder *encoder);
104 	void (*put_dplls)(struct intel_atomic_state *state,
105 			  struct intel_crtc *crtc);
106 	void (*update_active_dpll)(struct intel_atomic_state *state,
107 				   struct intel_crtc *crtc,
108 				   struct intel_encoder *encoder);
109 	void (*update_ref_clks)(struct drm_i915_private *i915);
110 	void (*dump_hw_state)(struct drm_printer *p,
111 			      const struct intel_dpll_hw_state *hw_state);
112 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
113 				 const struct intel_dpll_hw_state *b);
114 };
115 
116 static void
117 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
118 				  struct intel_shared_dpll_state *shared_dpll)
119 {
120 	struct intel_shared_dpll *pll;
121 	int i;
122 
123 	/* Copy shared dpll state */
124 	for_each_shared_dpll(i915, pll, i)
125 		shared_dpll[pll->index] = pll->state;
126 }
127 
128 static struct intel_shared_dpll_state *
129 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
130 {
131 	struct intel_atomic_state *state = to_intel_atomic_state(s);
132 
133 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
134 
135 	if (!state->dpll_set) {
136 		state->dpll_set = true;
137 
138 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
139 						  state->shared_dpll);
140 	}
141 
142 	return state->shared_dpll;
143 }
144 
145 /**
146  * intel_get_shared_dpll_by_id - get a DPLL given its id
147  * @i915: i915 device instance
148  * @id: pll id
149  *
150  * Returns:
151  * A pointer to the DPLL with @id
152  */
153 struct intel_shared_dpll *
154 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
155 			    enum intel_dpll_id id)
156 {
157 	struct intel_shared_dpll *pll;
158 	int i;
159 
160 	for_each_shared_dpll(i915, pll, i) {
161 		if (pll->info->id == id)
162 			return pll;
163 	}
164 
165 	MISSING_CASE(id);
166 	return NULL;
167 }
168 
169 /* For ILK+ */
170 void assert_shared_dpll(struct drm_i915_private *i915,
171 			struct intel_shared_dpll *pll,
172 			bool state)
173 {
174 	bool cur_state;
175 	struct intel_dpll_hw_state hw_state;
176 
177 	if (drm_WARN(&i915->drm, !pll,
178 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
179 		return;
180 
181 	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
182 	I915_STATE_WARN(i915, cur_state != state,
183 			"%s assertion failure (expected %s, current %s)\n",
184 			pll->info->name, str_on_off(state),
185 			str_on_off(cur_state));
186 }
187 
188 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
189 {
190 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
191 }
192 
193 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
194 {
195 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
196 }
197 
198 static i915_reg_t
199 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
200 			   struct intel_shared_dpll *pll)
201 {
202 	if (IS_DG1(i915))
203 		return DG1_DPLL_ENABLE(pll->info->id);
204 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
205 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
206 		return MG_PLL_ENABLE(0);
207 
208 	return ICL_DPLL_ENABLE(pll->info->id);
209 }
210 
211 static i915_reg_t
212 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
213 			struct intel_shared_dpll *pll)
214 {
215 	const enum intel_dpll_id id = pll->info->id;
216 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
217 
218 	if (IS_ALDERLAKE_P(i915))
219 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
220 
221 	return MG_PLL_ENABLE(tc_port);
222 }
223 
224 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
225 				      struct intel_shared_dpll *pll)
226 {
227 	if (pll->info->power_domain)
228 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
229 
230 	pll->info->funcs->enable(i915, pll);
231 	pll->on = true;
232 }
233 
234 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
235 				       struct intel_shared_dpll *pll)
236 {
237 	pll->info->funcs->disable(i915, pll);
238 	pll->on = false;
239 
240 	if (pll->info->power_domain)
241 		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
242 }
243 
244 /**
245  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
246  * @crtc_state: CRTC, and its state, which has a shared DPLL
247  *
248  * Enable the shared DPLL used by @crtc.
249  */
250 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
251 {
252 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
253 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
254 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
255 	unsigned int pipe_mask = BIT(crtc->pipe);
256 	unsigned int old_mask;
257 
258 	if (drm_WARN_ON(&i915->drm, pll == NULL))
259 		return;
260 
261 	mutex_lock(&i915->display.dpll.lock);
262 	old_mask = pll->active_mask;
263 
264 	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
265 	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
266 		goto out;
267 
268 	pll->active_mask |= pipe_mask;
269 
270 	drm_dbg_kms(&i915->drm,
271 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
272 		    pll->info->name, pll->active_mask, pll->on,
273 		    crtc->base.base.id, crtc->base.name);
274 
275 	if (old_mask) {
276 		drm_WARN_ON(&i915->drm, !pll->on);
277 		assert_shared_dpll_enabled(i915, pll);
278 		goto out;
279 	}
280 	drm_WARN_ON(&i915->drm, pll->on);
281 
282 	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
283 
284 	_intel_enable_shared_dpll(i915, pll);
285 
286 out:
287 	mutex_unlock(&i915->display.dpll.lock);
288 }
289 
290 /**
291  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
292  * @crtc_state: CRTC, and its state, which has a shared DPLL
293  *
294  * Disable the shared DPLL used by @crtc.
295  */
296 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
297 {
298 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
299 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
300 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
301 	unsigned int pipe_mask = BIT(crtc->pipe);
302 
303 	/* PCH only available on ILK+ */
304 	if (DISPLAY_VER(i915) < 5)
305 		return;
306 
307 	if (pll == NULL)
308 		return;
309 
310 	mutex_lock(&i915->display.dpll.lock);
311 	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
312 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
313 		     crtc->base.base.id, crtc->base.name))
314 		goto out;
315 
316 	drm_dbg_kms(&i915->drm,
317 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
318 		    pll->info->name, pll->active_mask, pll->on,
319 		    crtc->base.base.id, crtc->base.name);
320 
321 	assert_shared_dpll_enabled(i915, pll);
322 	drm_WARN_ON(&i915->drm, !pll->on);
323 
324 	pll->active_mask &= ~pipe_mask;
325 	if (pll->active_mask)
326 		goto out;
327 
328 	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
329 
330 	_intel_disable_shared_dpll(i915, pll);
331 
332 out:
333 	mutex_unlock(&i915->display.dpll.lock);
334 }
335 
336 static unsigned long
337 intel_dpll_mask_all(struct drm_i915_private *i915)
338 {
339 	struct intel_shared_dpll *pll;
340 	unsigned long dpll_mask = 0;
341 	int i;
342 
343 	for_each_shared_dpll(i915, pll, i) {
344 		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
345 
346 		dpll_mask |= BIT(pll->info->id);
347 	}
348 
349 	return dpll_mask;
350 }
351 
352 static struct intel_shared_dpll *
353 intel_find_shared_dpll(struct intel_atomic_state *state,
354 		       const struct intel_crtc *crtc,
355 		       const struct intel_dpll_hw_state *pll_state,
356 		       unsigned long dpll_mask)
357 {
358 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
359 	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
360 	struct intel_shared_dpll_state *shared_dpll;
361 	struct intel_shared_dpll *unused_pll = NULL;
362 	enum intel_dpll_id id;
363 
364 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
365 
366 	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
367 
368 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
369 		struct intel_shared_dpll *pll;
370 
371 		pll = intel_get_shared_dpll_by_id(i915, id);
372 		if (!pll)
373 			continue;
374 
375 		/* Only want to check enabled timings first */
376 		if (shared_dpll[pll->index].pipe_mask == 0) {
377 			if (!unused_pll)
378 				unused_pll = pll;
379 			continue;
380 		}
381 
382 		if (memcmp(pll_state,
383 			   &shared_dpll[pll->index].hw_state,
384 			   sizeof(*pll_state)) == 0) {
385 			drm_dbg_kms(&i915->drm,
386 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
387 				    crtc->base.base.id, crtc->base.name,
388 				    pll->info->name,
389 				    shared_dpll[pll->index].pipe_mask,
390 				    pll->active_mask);
391 			return pll;
392 		}
393 	}
394 
395 	/* Ok no matching timings, maybe there's a free one? */
396 	if (unused_pll) {
397 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
398 			    crtc->base.base.id, crtc->base.name,
399 			    unused_pll->info->name);
400 		return unused_pll;
401 	}
402 
403 	return NULL;
404 }
405 
406 /**
407  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
408  * @crtc: CRTC on which behalf the reference is taken
409  * @pll: DPLL for which the reference is taken
410  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
411  *
412  * Take a reference for @pll tracking the use of it by @crtc.
413  */
414 static void
415 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
416 				 const struct intel_shared_dpll *pll,
417 				 struct intel_shared_dpll_state *shared_dpll_state)
418 {
419 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
420 
421 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
422 
423 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
424 
425 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
426 		    crtc->base.base.id, crtc->base.name, pll->info->name);
427 }
428 
429 static void
430 intel_reference_shared_dpll(struct intel_atomic_state *state,
431 			    const struct intel_crtc *crtc,
432 			    const struct intel_shared_dpll *pll,
433 			    const struct intel_dpll_hw_state *pll_state)
434 {
435 	struct intel_shared_dpll_state *shared_dpll;
436 
437 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
438 
439 	if (shared_dpll[pll->index].pipe_mask == 0)
440 		shared_dpll[pll->index].hw_state = *pll_state;
441 
442 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
443 }
444 
445 /**
446  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
447  * @crtc: CRTC on which behalf the reference is dropped
448  * @pll: DPLL for which the reference is dropped
449  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
450  *
451  * Drop a reference for @pll tracking the end of use of it by @crtc.
452  */
453 void
454 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
455 				   const struct intel_shared_dpll *pll,
456 				   struct intel_shared_dpll_state *shared_dpll_state)
457 {
458 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
459 
460 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
461 
462 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
463 
464 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
465 		    crtc->base.base.id, crtc->base.name, pll->info->name);
466 }
467 
468 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
469 					  const struct intel_crtc *crtc,
470 					  const struct intel_shared_dpll *pll)
471 {
472 	struct intel_shared_dpll_state *shared_dpll;
473 
474 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
475 
476 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
477 }
478 
479 static void intel_put_dpll(struct intel_atomic_state *state,
480 			   struct intel_crtc *crtc)
481 {
482 	const struct intel_crtc_state *old_crtc_state =
483 		intel_atomic_get_old_crtc_state(state, crtc);
484 	struct intel_crtc_state *new_crtc_state =
485 		intel_atomic_get_new_crtc_state(state, crtc);
486 
487 	new_crtc_state->shared_dpll = NULL;
488 
489 	if (!old_crtc_state->shared_dpll)
490 		return;
491 
492 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
493 }
494 
495 /**
496  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
497  * @state: atomic state
498  *
499  * This is the dpll version of drm_atomic_helper_swap_state() since the
500  * helper does not handle driver-specific global state.
501  *
502  * For consistency with atomic helpers this function does a complete swap,
503  * i.e. it also puts the current state into @state, even though there is no
504  * need for that at this moment.
505  */
506 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
507 {
508 	struct drm_i915_private *i915 = to_i915(state->base.dev);
509 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
510 	struct intel_shared_dpll *pll;
511 	int i;
512 
513 	if (!state->dpll_set)
514 		return;
515 
516 	for_each_shared_dpll(i915, pll, i)
517 		swap(pll->state, shared_dpll[pll->index]);
518 }
519 
520 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
521 				      struct intel_shared_dpll *pll,
522 				      struct intel_dpll_hw_state *hw_state)
523 {
524 	const enum intel_dpll_id id = pll->info->id;
525 	intel_wakeref_t wakeref;
526 	u32 val;
527 
528 	wakeref = intel_display_power_get_if_enabled(i915,
529 						     POWER_DOMAIN_DISPLAY_CORE);
530 	if (!wakeref)
531 		return false;
532 
533 	val = intel_de_read(i915, PCH_DPLL(id));
534 	hw_state->dpll = val;
535 	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
536 	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
537 
538 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
539 
540 	return val & DPLL_VCO_ENABLE;
541 }
542 
543 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
544 {
545 	u32 val;
546 	bool enabled;
547 
548 	val = intel_de_read(i915, PCH_DREF_CONTROL);
549 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
550 			    DREF_SUPERSPREAD_SOURCE_MASK));
551 	I915_STATE_WARN(i915, !enabled,
552 			"PCH refclk assertion failure, should be active but is disabled\n");
553 }
554 
555 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
556 				struct intel_shared_dpll *pll)
557 {
558 	const enum intel_dpll_id id = pll->info->id;
559 
560 	/* PCH refclock must be enabled first */
561 	ibx_assert_pch_refclk_enabled(i915);
562 
563 	intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
564 	intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
565 
566 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
567 
568 	/* Wait for the clocks to stabilize. */
569 	intel_de_posting_read(i915, PCH_DPLL(id));
570 	udelay(150);
571 
572 	/* The pixel multiplier can only be updated once the
573 	 * DPLL is enabled and the clocks are stable.
574 	 *
575 	 * So write it again.
576 	 */
577 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
578 	intel_de_posting_read(i915, PCH_DPLL(id));
579 	udelay(200);
580 }
581 
582 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
583 				 struct intel_shared_dpll *pll)
584 {
585 	const enum intel_dpll_id id = pll->info->id;
586 
587 	intel_de_write(i915, PCH_DPLL(id), 0);
588 	intel_de_posting_read(i915, PCH_DPLL(id));
589 	udelay(200);
590 }
591 
592 static int ibx_compute_dpll(struct intel_atomic_state *state,
593 			    struct intel_crtc *crtc,
594 			    struct intel_encoder *encoder)
595 {
596 	return 0;
597 }
598 
599 static int ibx_get_dpll(struct intel_atomic_state *state,
600 			struct intel_crtc *crtc,
601 			struct intel_encoder *encoder)
602 {
603 	struct intel_crtc_state *crtc_state =
604 		intel_atomic_get_new_crtc_state(state, crtc);
605 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
606 	struct intel_shared_dpll *pll;
607 	enum intel_dpll_id id;
608 
609 	if (HAS_PCH_IBX(i915)) {
610 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
611 		id = (enum intel_dpll_id) crtc->pipe;
612 		pll = intel_get_shared_dpll_by_id(i915, id);
613 
614 		drm_dbg_kms(&i915->drm,
615 			    "[CRTC:%d:%s] using pre-allocated %s\n",
616 			    crtc->base.base.id, crtc->base.name,
617 			    pll->info->name);
618 	} else {
619 		pll = intel_find_shared_dpll(state, crtc,
620 					     &crtc_state->dpll_hw_state,
621 					     BIT(DPLL_ID_PCH_PLL_B) |
622 					     BIT(DPLL_ID_PCH_PLL_A));
623 	}
624 
625 	if (!pll)
626 		return -EINVAL;
627 
628 	/* reference the pll */
629 	intel_reference_shared_dpll(state, crtc,
630 				    pll, &crtc_state->dpll_hw_state);
631 
632 	crtc_state->shared_dpll = pll;
633 
634 	return 0;
635 }
636 
637 static void ibx_dump_hw_state(struct drm_printer *p,
638 			      const struct intel_dpll_hw_state *hw_state)
639 {
640 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
641 		   "fp0: 0x%x, fp1: 0x%x\n",
642 		   hw_state->dpll,
643 		   hw_state->dpll_md,
644 		   hw_state->fp0,
645 		   hw_state->fp1);
646 }
647 
648 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
649 				 const struct intel_dpll_hw_state *b)
650 {
651 	return a->dpll == b->dpll &&
652 		a->dpll_md == b->dpll_md &&
653 		a->fp0 == b->fp0 &&
654 		a->fp1 == b->fp1;
655 }
656 
657 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
658 	.enable = ibx_pch_dpll_enable,
659 	.disable = ibx_pch_dpll_disable,
660 	.get_hw_state = ibx_pch_dpll_get_hw_state,
661 };
662 
663 static const struct dpll_info pch_plls[] = {
664 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
665 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
666 	{}
667 };
668 
669 static const struct intel_dpll_mgr pch_pll_mgr = {
670 	.dpll_info = pch_plls,
671 	.compute_dplls = ibx_compute_dpll,
672 	.get_dplls = ibx_get_dpll,
673 	.put_dplls = intel_put_dpll,
674 	.dump_hw_state = ibx_dump_hw_state,
675 	.compare_hw_state = ibx_compare_hw_state,
676 };
677 
678 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
679 				 struct intel_shared_dpll *pll)
680 {
681 	const enum intel_dpll_id id = pll->info->id;
682 
683 	intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
684 	intel_de_posting_read(i915, WRPLL_CTL(id));
685 	udelay(20);
686 }
687 
688 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
689 				struct intel_shared_dpll *pll)
690 {
691 	intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
692 	intel_de_posting_read(i915, SPLL_CTL);
693 	udelay(20);
694 }
695 
696 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
697 				  struct intel_shared_dpll *pll)
698 {
699 	const enum intel_dpll_id id = pll->info->id;
700 
701 	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
702 	intel_de_posting_read(i915, WRPLL_CTL(id));
703 
704 	/*
705 	 * Try to set up the PCH reference clock once all DPLLs
706 	 * that depend on it have been shut down.
707 	 */
708 	if (i915->display.dpll.pch_ssc_use & BIT(id))
709 		intel_init_pch_refclk(i915);
710 }
711 
712 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
713 				 struct intel_shared_dpll *pll)
714 {
715 	enum intel_dpll_id id = pll->info->id;
716 
717 	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
718 	intel_de_posting_read(i915, SPLL_CTL);
719 
720 	/*
721 	 * Try to set up the PCH reference clock once all DPLLs
722 	 * that depend on it have been shut down.
723 	 */
724 	if (i915->display.dpll.pch_ssc_use & BIT(id))
725 		intel_init_pch_refclk(i915);
726 }
727 
728 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
729 				       struct intel_shared_dpll *pll,
730 				       struct intel_dpll_hw_state *hw_state)
731 {
732 	const enum intel_dpll_id id = pll->info->id;
733 	intel_wakeref_t wakeref;
734 	u32 val;
735 
736 	wakeref = intel_display_power_get_if_enabled(i915,
737 						     POWER_DOMAIN_DISPLAY_CORE);
738 	if (!wakeref)
739 		return false;
740 
741 	val = intel_de_read(i915, WRPLL_CTL(id));
742 	hw_state->wrpll = val;
743 
744 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
745 
746 	return val & WRPLL_PLL_ENABLE;
747 }
748 
749 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
750 				      struct intel_shared_dpll *pll,
751 				      struct intel_dpll_hw_state *hw_state)
752 {
753 	intel_wakeref_t wakeref;
754 	u32 val;
755 
756 	wakeref = intel_display_power_get_if_enabled(i915,
757 						     POWER_DOMAIN_DISPLAY_CORE);
758 	if (!wakeref)
759 		return false;
760 
761 	val = intel_de_read(i915, SPLL_CTL);
762 	hw_state->spll = val;
763 
764 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
765 
766 	return val & SPLL_PLL_ENABLE;
767 }
768 
769 #define LC_FREQ 2700
770 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
771 
772 #define P_MIN 2
773 #define P_MAX 64
774 #define P_INC 2
775 
776 /* Constraints for PLL good behavior */
777 #define REF_MIN 48
778 #define REF_MAX 400
779 #define VCO_MIN 2400
780 #define VCO_MAX 4800
781 
782 struct hsw_wrpll_rnp {
783 	unsigned p, n2, r2;
784 };
785 
786 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
787 {
788 	switch (clock) {
789 	case 25175000:
790 	case 25200000:
791 	case 27000000:
792 	case 27027000:
793 	case 37762500:
794 	case 37800000:
795 	case 40500000:
796 	case 40541000:
797 	case 54000000:
798 	case 54054000:
799 	case 59341000:
800 	case 59400000:
801 	case 72000000:
802 	case 74176000:
803 	case 74250000:
804 	case 81000000:
805 	case 81081000:
806 	case 89012000:
807 	case 89100000:
808 	case 108000000:
809 	case 108108000:
810 	case 111264000:
811 	case 111375000:
812 	case 148352000:
813 	case 148500000:
814 	case 162000000:
815 	case 162162000:
816 	case 222525000:
817 	case 222750000:
818 	case 296703000:
819 	case 297000000:
820 		return 0;
821 	case 233500000:
822 	case 245250000:
823 	case 247750000:
824 	case 253250000:
825 	case 298000000:
826 		return 1500;
827 	case 169128000:
828 	case 169500000:
829 	case 179500000:
830 	case 202000000:
831 		return 2000;
832 	case 256250000:
833 	case 262500000:
834 	case 270000000:
835 	case 272500000:
836 	case 273750000:
837 	case 280750000:
838 	case 281250000:
839 	case 286000000:
840 	case 291750000:
841 		return 4000;
842 	case 267250000:
843 	case 268500000:
844 		return 5000;
845 	default:
846 		return 1000;
847 	}
848 }
849 
850 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
851 				 unsigned int r2, unsigned int n2,
852 				 unsigned int p,
853 				 struct hsw_wrpll_rnp *best)
854 {
855 	u64 a, b, c, d, diff, diff_best;
856 
857 	/* No best (r,n,p) yet */
858 	if (best->p == 0) {
859 		best->p = p;
860 		best->n2 = n2;
861 		best->r2 = r2;
862 		return;
863 	}
864 
865 	/*
866 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
867 	 * freq2k.
868 	 *
869 	 * delta = 1e6 *
870 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
871 	 *	   freq2k;
872 	 *
873 	 * and we would like delta <= budget.
874 	 *
875 	 * If the discrepancy is above the PPM-based budget, always prefer to
876 	 * improve upon the previous solution.  However, if you're within the
877 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
878 	 */
879 	a = freq2k * budget * p * r2;
880 	b = freq2k * budget * best->p * best->r2;
881 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
882 	diff_best = abs_diff(freq2k * best->p * best->r2,
883 			     LC_FREQ_2K * best->n2);
884 	c = 1000000 * diff;
885 	d = 1000000 * diff_best;
886 
887 	if (a < c && b < d) {
888 		/* If both are above the budget, pick the closer */
889 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
890 			best->p = p;
891 			best->n2 = n2;
892 			best->r2 = r2;
893 		}
894 	} else if (a >= c && b < d) {
895 		/* If A is below the threshold but B is above it?  Update. */
896 		best->p = p;
897 		best->n2 = n2;
898 		best->r2 = r2;
899 	} else if (a >= c && b >= d) {
900 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
901 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
902 			best->p = p;
903 			best->n2 = n2;
904 			best->r2 = r2;
905 		}
906 	}
907 	/* Otherwise a < c && b >= d, do nothing */
908 }
909 
910 static void
911 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
912 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
913 {
914 	u64 freq2k;
915 	unsigned p, n2, r2;
916 	struct hsw_wrpll_rnp best = {};
917 	unsigned budget;
918 
919 	freq2k = clock / 100;
920 
921 	budget = hsw_wrpll_get_budget_for_freq(clock);
922 
923 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
924 	 * and directly pass the LC PLL to it. */
925 	if (freq2k == 5400000) {
926 		*n2_out = 2;
927 		*p_out = 1;
928 		*r2_out = 2;
929 		return;
930 	}
931 
932 	/*
933 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
934 	 * the WR PLL.
935 	 *
936 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
937 	 * Injecting R2 = 2 * R gives:
938 	 *   REF_MAX * r2 > LC_FREQ * 2 and
939 	 *   REF_MIN * r2 < LC_FREQ * 2
940 	 *
941 	 * Which means the desired boundaries for r2 are:
942 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
943 	 *
944 	 */
945 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
946 	     r2 <= LC_FREQ * 2 / REF_MIN;
947 	     r2++) {
948 
949 		/*
950 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
951 		 *
952 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
953 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
954 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
955 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
956 		 *
957 		 * Which means the desired boundaries for n2 are:
958 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
959 		 */
960 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
961 		     n2 <= VCO_MAX * r2 / LC_FREQ;
962 		     n2++) {
963 
964 			for (p = P_MIN; p <= P_MAX; p += P_INC)
965 				hsw_wrpll_update_rnp(freq2k, budget,
966 						     r2, n2, p, &best);
967 		}
968 	}
969 
970 	*n2_out = best.n2;
971 	*p_out = best.p;
972 	*r2_out = best.r2;
973 }
974 
975 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
976 				  const struct intel_shared_dpll *pll,
977 				  const struct intel_dpll_hw_state *pll_state)
978 {
979 	int refclk;
980 	int n, p, r;
981 	u32 wrpll = pll_state->wrpll;
982 
983 	switch (wrpll & WRPLL_REF_MASK) {
984 	case WRPLL_REF_SPECIAL_HSW:
985 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
986 		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
987 			refclk = i915->display.dpll.ref_clks.nssc;
988 			break;
989 		}
990 		fallthrough;
991 	case WRPLL_REF_PCH_SSC:
992 		/*
993 		 * We could calculate spread here, but our checking
994 		 * code only cares about 5% accuracy, and spread is a max of
995 		 * 0.5% downspread.
996 		 */
997 		refclk = i915->display.dpll.ref_clks.ssc;
998 		break;
999 	case WRPLL_REF_LCPLL:
1000 		refclk = 2700000;
1001 		break;
1002 	default:
1003 		MISSING_CASE(wrpll);
1004 		return 0;
1005 	}
1006 
1007 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1008 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1009 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1010 
1011 	/* Convert to KHz, p & r have a fixed point portion */
1012 	return (refclk * n / 10) / (p * r) * 2;
1013 }
1014 
1015 static int
1016 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1017 			   struct intel_crtc *crtc)
1018 {
1019 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1020 	struct intel_crtc_state *crtc_state =
1021 		intel_atomic_get_new_crtc_state(state, crtc);
1022 	unsigned int p, n2, r2;
1023 
1024 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1025 
1026 	crtc_state->dpll_hw_state.wrpll =
1027 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1028 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1029 		WRPLL_DIVIDER_POST(p);
1030 
1031 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1032 							&crtc_state->dpll_hw_state);
1033 
1034 	return 0;
1035 }
1036 
1037 static struct intel_shared_dpll *
1038 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1039 		       struct intel_crtc *crtc)
1040 {
1041 	struct intel_crtc_state *crtc_state =
1042 		intel_atomic_get_new_crtc_state(state, crtc);
1043 
1044 	return intel_find_shared_dpll(state, crtc,
1045 				      &crtc_state->dpll_hw_state,
1046 				      BIT(DPLL_ID_WRPLL2) |
1047 				      BIT(DPLL_ID_WRPLL1));
1048 }
1049 
1050 static int
1051 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1052 {
1053 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1054 	int clock = crtc_state->port_clock;
1055 
1056 	switch (clock / 2) {
1057 	case 81000:
1058 	case 135000:
1059 	case 270000:
1060 		return 0;
1061 	default:
1062 		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1063 			    clock);
1064 		return -EINVAL;
1065 	}
1066 }
1067 
1068 static struct intel_shared_dpll *
1069 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1070 {
1071 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1072 	struct intel_shared_dpll *pll;
1073 	enum intel_dpll_id pll_id;
1074 	int clock = crtc_state->port_clock;
1075 
1076 	switch (clock / 2) {
1077 	case 81000:
1078 		pll_id = DPLL_ID_LCPLL_810;
1079 		break;
1080 	case 135000:
1081 		pll_id = DPLL_ID_LCPLL_1350;
1082 		break;
1083 	case 270000:
1084 		pll_id = DPLL_ID_LCPLL_2700;
1085 		break;
1086 	default:
1087 		MISSING_CASE(clock / 2);
1088 		return NULL;
1089 	}
1090 
1091 	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1092 
1093 	if (!pll)
1094 		return NULL;
1095 
1096 	return pll;
1097 }
1098 
1099 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1100 				  const struct intel_shared_dpll *pll,
1101 				  const struct intel_dpll_hw_state *pll_state)
1102 {
1103 	int link_clock = 0;
1104 
1105 	switch (pll->info->id) {
1106 	case DPLL_ID_LCPLL_810:
1107 		link_clock = 81000;
1108 		break;
1109 	case DPLL_ID_LCPLL_1350:
1110 		link_clock = 135000;
1111 		break;
1112 	case DPLL_ID_LCPLL_2700:
1113 		link_clock = 270000;
1114 		break;
1115 	default:
1116 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1117 		break;
1118 	}
1119 
1120 	return link_clock * 2;
1121 }
1122 
1123 static int
1124 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1125 			  struct intel_crtc *crtc)
1126 {
1127 	struct intel_crtc_state *crtc_state =
1128 		intel_atomic_get_new_crtc_state(state, crtc);
1129 
1130 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1131 		return -EINVAL;
1132 
1133 	crtc_state->dpll_hw_state.spll =
1134 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1135 
1136 	return 0;
1137 }
1138 
1139 static struct intel_shared_dpll *
1140 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1141 		      struct intel_crtc *crtc)
1142 {
1143 	struct intel_crtc_state *crtc_state =
1144 		intel_atomic_get_new_crtc_state(state, crtc);
1145 
1146 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1147 				      BIT(DPLL_ID_SPLL));
1148 }
1149 
1150 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1151 				 const struct intel_shared_dpll *pll,
1152 				 const struct intel_dpll_hw_state *pll_state)
1153 {
1154 	int link_clock = 0;
1155 
1156 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1157 	case SPLL_FREQ_810MHz:
1158 		link_clock = 81000;
1159 		break;
1160 	case SPLL_FREQ_1350MHz:
1161 		link_clock = 135000;
1162 		break;
1163 	case SPLL_FREQ_2700MHz:
1164 		link_clock = 270000;
1165 		break;
1166 	default:
1167 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1168 		break;
1169 	}
1170 
1171 	return link_clock * 2;
1172 }
1173 
1174 static int hsw_compute_dpll(struct intel_atomic_state *state,
1175 			    struct intel_crtc *crtc,
1176 			    struct intel_encoder *encoder)
1177 {
1178 	struct intel_crtc_state *crtc_state =
1179 		intel_atomic_get_new_crtc_state(state, crtc);
1180 
1181 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1182 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1183 	else if (intel_crtc_has_dp_encoder(crtc_state))
1184 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1185 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1186 		return hsw_ddi_spll_compute_dpll(state, crtc);
1187 	else
1188 		return -EINVAL;
1189 }
1190 
1191 static int hsw_get_dpll(struct intel_atomic_state *state,
1192 			struct intel_crtc *crtc,
1193 			struct intel_encoder *encoder)
1194 {
1195 	struct intel_crtc_state *crtc_state =
1196 		intel_atomic_get_new_crtc_state(state, crtc);
1197 	struct intel_shared_dpll *pll = NULL;
1198 
1199 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1200 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1201 	else if (intel_crtc_has_dp_encoder(crtc_state))
1202 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1203 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1204 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1205 
1206 	if (!pll)
1207 		return -EINVAL;
1208 
1209 	intel_reference_shared_dpll(state, crtc,
1210 				    pll, &crtc_state->dpll_hw_state);
1211 
1212 	crtc_state->shared_dpll = pll;
1213 
1214 	return 0;
1215 }
1216 
1217 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1218 {
1219 	i915->display.dpll.ref_clks.ssc = 135000;
1220 	/* Non-SSC is only used on non-ULT HSW. */
1221 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1222 		i915->display.dpll.ref_clks.nssc = 24000;
1223 	else
1224 		i915->display.dpll.ref_clks.nssc = 135000;
1225 }
1226 
1227 static void hsw_dump_hw_state(struct drm_printer *p,
1228 			      const struct intel_dpll_hw_state *hw_state)
1229 {
1230 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1231 		   hw_state->wrpll, hw_state->spll);
1232 }
1233 
1234 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
1235 				 const struct intel_dpll_hw_state *b)
1236 {
1237 	return a->wrpll == b->wrpll &&
1238 		a->spll == b->spll;
1239 }
1240 
1241 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1242 	.enable = hsw_ddi_wrpll_enable,
1243 	.disable = hsw_ddi_wrpll_disable,
1244 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1245 	.get_freq = hsw_ddi_wrpll_get_freq,
1246 };
1247 
1248 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1249 	.enable = hsw_ddi_spll_enable,
1250 	.disable = hsw_ddi_spll_disable,
1251 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1252 	.get_freq = hsw_ddi_spll_get_freq,
1253 };
1254 
1255 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1256 				 struct intel_shared_dpll *pll)
1257 {
1258 }
1259 
1260 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1261 				  struct intel_shared_dpll *pll)
1262 {
1263 }
1264 
1265 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1266 				       struct intel_shared_dpll *pll,
1267 				       struct intel_dpll_hw_state *hw_state)
1268 {
1269 	return true;
1270 }
1271 
1272 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1273 	.enable = hsw_ddi_lcpll_enable,
1274 	.disable = hsw_ddi_lcpll_disable,
1275 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1276 	.get_freq = hsw_ddi_lcpll_get_freq,
1277 };
1278 
1279 static const struct dpll_info hsw_plls[] = {
1280 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1281 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1282 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1283 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1284 	  .always_on = true, },
1285 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1286 	  .always_on = true, },
1287 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1288 	  .always_on = true, },
1289 	{}
1290 };
1291 
1292 static const struct intel_dpll_mgr hsw_pll_mgr = {
1293 	.dpll_info = hsw_plls,
1294 	.compute_dplls = hsw_compute_dpll,
1295 	.get_dplls = hsw_get_dpll,
1296 	.put_dplls = intel_put_dpll,
1297 	.update_ref_clks = hsw_update_dpll_ref_clks,
1298 	.dump_hw_state = hsw_dump_hw_state,
1299 	.compare_hw_state = hsw_compare_hw_state,
1300 };
1301 
1302 struct skl_dpll_regs {
1303 	i915_reg_t ctl, cfgcr1, cfgcr2;
1304 };
1305 
1306 /* this array is indexed by the *shared* pll id */
1307 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1308 	{
1309 		/* DPLL 0 */
1310 		.ctl = LCPLL1_CTL,
1311 		/* DPLL 0 doesn't support HDMI mode */
1312 	},
1313 	{
1314 		/* DPLL 1 */
1315 		.ctl = LCPLL2_CTL,
1316 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1317 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1318 	},
1319 	{
1320 		/* DPLL 2 */
1321 		.ctl = WRPLL_CTL(0),
1322 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1323 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1324 	},
1325 	{
1326 		/* DPLL 3 */
1327 		.ctl = WRPLL_CTL(1),
1328 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1329 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1330 	},
1331 };
1332 
1333 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1334 				    struct intel_shared_dpll *pll)
1335 {
1336 	const enum intel_dpll_id id = pll->info->id;
1337 
1338 	intel_de_rmw(i915, DPLL_CTRL1,
1339 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1340 		     pll->state.hw_state.ctrl1 << (id * 6));
1341 	intel_de_posting_read(i915, DPLL_CTRL1);
1342 }
1343 
1344 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1345 			       struct intel_shared_dpll *pll)
1346 {
1347 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1348 	const enum intel_dpll_id id = pll->info->id;
1349 
1350 	skl_ddi_pll_write_ctrl1(i915, pll);
1351 
1352 	intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1353 	intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1354 	intel_de_posting_read(i915, regs[id].cfgcr1);
1355 	intel_de_posting_read(i915, regs[id].cfgcr2);
1356 
1357 	/* the enable bit is always bit 31 */
1358 	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1359 
1360 	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1361 		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1362 }
1363 
1364 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1365 				 struct intel_shared_dpll *pll)
1366 {
1367 	skl_ddi_pll_write_ctrl1(i915, pll);
1368 }
1369 
1370 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1371 				struct intel_shared_dpll *pll)
1372 {
1373 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1374 	const enum intel_dpll_id id = pll->info->id;
1375 
1376 	/* the enable bit is always bit 31 */
1377 	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1378 	intel_de_posting_read(i915, regs[id].ctl);
1379 }
1380 
1381 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1382 				  struct intel_shared_dpll *pll)
1383 {
1384 }
1385 
1386 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1387 				     struct intel_shared_dpll *pll,
1388 				     struct intel_dpll_hw_state *hw_state)
1389 {
1390 	u32 val;
1391 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1392 	const enum intel_dpll_id id = pll->info->id;
1393 	intel_wakeref_t wakeref;
1394 	bool ret;
1395 
1396 	wakeref = intel_display_power_get_if_enabled(i915,
1397 						     POWER_DOMAIN_DISPLAY_CORE);
1398 	if (!wakeref)
1399 		return false;
1400 
1401 	ret = false;
1402 
1403 	val = intel_de_read(i915, regs[id].ctl);
1404 	if (!(val & LCPLL_PLL_ENABLE))
1405 		goto out;
1406 
1407 	val = intel_de_read(i915, DPLL_CTRL1);
1408 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1409 
1410 	/* avoid reading back stale values if HDMI mode is not enabled */
1411 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1412 		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1413 		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1414 	}
1415 	ret = true;
1416 
1417 out:
1418 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1419 
1420 	return ret;
1421 }
1422 
1423 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1424 				       struct intel_shared_dpll *pll,
1425 				       struct intel_dpll_hw_state *hw_state)
1426 {
1427 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1428 	const enum intel_dpll_id id = pll->info->id;
1429 	intel_wakeref_t wakeref;
1430 	u32 val;
1431 	bool ret;
1432 
1433 	wakeref = intel_display_power_get_if_enabled(i915,
1434 						     POWER_DOMAIN_DISPLAY_CORE);
1435 	if (!wakeref)
1436 		return false;
1437 
1438 	ret = false;
1439 
1440 	/* DPLL0 is always enabled since it drives CDCLK */
1441 	val = intel_de_read(i915, regs[id].ctl);
1442 	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1443 		goto out;
1444 
1445 	val = intel_de_read(i915, DPLL_CTRL1);
1446 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1447 
1448 	ret = true;
1449 
1450 out:
1451 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1452 
1453 	return ret;
1454 }
1455 
1456 struct skl_wrpll_context {
1457 	u64 min_deviation;		/* current minimal deviation */
1458 	u64 central_freq;		/* chosen central freq */
1459 	u64 dco_freq;			/* chosen dco freq */
1460 	unsigned int p;			/* chosen divider */
1461 };
1462 
1463 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1464 #define SKL_DCO_MAX_PDEVIATION	100
1465 #define SKL_DCO_MAX_NDEVIATION	600
1466 
1467 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1468 				  u64 central_freq,
1469 				  u64 dco_freq,
1470 				  unsigned int divider)
1471 {
1472 	u64 deviation;
1473 
1474 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1475 			      central_freq);
1476 
1477 	/* positive deviation */
1478 	if (dco_freq >= central_freq) {
1479 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1480 		    deviation < ctx->min_deviation) {
1481 			ctx->min_deviation = deviation;
1482 			ctx->central_freq = central_freq;
1483 			ctx->dco_freq = dco_freq;
1484 			ctx->p = divider;
1485 		}
1486 	/* negative deviation */
1487 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1488 		   deviation < ctx->min_deviation) {
1489 		ctx->min_deviation = deviation;
1490 		ctx->central_freq = central_freq;
1491 		ctx->dco_freq = dco_freq;
1492 		ctx->p = divider;
1493 	}
1494 }
1495 
1496 static void skl_wrpll_get_multipliers(unsigned int p,
1497 				      unsigned int *p0 /* out */,
1498 				      unsigned int *p1 /* out */,
1499 				      unsigned int *p2 /* out */)
1500 {
1501 	/* even dividers */
1502 	if (p % 2 == 0) {
1503 		unsigned int half = p / 2;
1504 
1505 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1506 			*p0 = 2;
1507 			*p1 = 1;
1508 			*p2 = half;
1509 		} else if (half % 2 == 0) {
1510 			*p0 = 2;
1511 			*p1 = half / 2;
1512 			*p2 = 2;
1513 		} else if (half % 3 == 0) {
1514 			*p0 = 3;
1515 			*p1 = half / 3;
1516 			*p2 = 2;
1517 		} else if (half % 7 == 0) {
1518 			*p0 = 7;
1519 			*p1 = half / 7;
1520 			*p2 = 2;
1521 		}
1522 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1523 		*p0 = 3;
1524 		*p1 = 1;
1525 		*p2 = p / 3;
1526 	} else if (p == 5 || p == 7) {
1527 		*p0 = p;
1528 		*p1 = 1;
1529 		*p2 = 1;
1530 	} else if (p == 15) {
1531 		*p0 = 3;
1532 		*p1 = 1;
1533 		*p2 = 5;
1534 	} else if (p == 21) {
1535 		*p0 = 7;
1536 		*p1 = 1;
1537 		*p2 = 3;
1538 	} else if (p == 35) {
1539 		*p0 = 7;
1540 		*p1 = 1;
1541 		*p2 = 5;
1542 	}
1543 }
1544 
1545 struct skl_wrpll_params {
1546 	u32 dco_fraction;
1547 	u32 dco_integer;
1548 	u32 qdiv_ratio;
1549 	u32 qdiv_mode;
1550 	u32 kdiv;
1551 	u32 pdiv;
1552 	u32 central_freq;
1553 };
1554 
1555 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1556 				      u64 afe_clock,
1557 				      int ref_clock,
1558 				      u64 central_freq,
1559 				      u32 p0, u32 p1, u32 p2)
1560 {
1561 	u64 dco_freq;
1562 
1563 	switch (central_freq) {
1564 	case 9600000000ULL:
1565 		params->central_freq = 0;
1566 		break;
1567 	case 9000000000ULL:
1568 		params->central_freq = 1;
1569 		break;
1570 	case 8400000000ULL:
1571 		params->central_freq = 3;
1572 	}
1573 
1574 	switch (p0) {
1575 	case 1:
1576 		params->pdiv = 0;
1577 		break;
1578 	case 2:
1579 		params->pdiv = 1;
1580 		break;
1581 	case 3:
1582 		params->pdiv = 2;
1583 		break;
1584 	case 7:
1585 		params->pdiv = 4;
1586 		break;
1587 	default:
1588 		WARN(1, "Incorrect PDiv\n");
1589 	}
1590 
1591 	switch (p2) {
1592 	case 5:
1593 		params->kdiv = 0;
1594 		break;
1595 	case 2:
1596 		params->kdiv = 1;
1597 		break;
1598 	case 3:
1599 		params->kdiv = 2;
1600 		break;
1601 	case 1:
1602 		params->kdiv = 3;
1603 		break;
1604 	default:
1605 		WARN(1, "Incorrect KDiv\n");
1606 	}
1607 
1608 	params->qdiv_ratio = p1;
1609 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1610 
1611 	dco_freq = p0 * p1 * p2 * afe_clock;
1612 
1613 	/*
1614 	 * Intermediate values are in Hz.
1615 	 * Divide by MHz to match bsepc
1616 	 */
1617 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1618 	params->dco_fraction =
1619 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1620 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1621 }
1622 
1623 static int
1624 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1625 			int ref_clock,
1626 			struct skl_wrpll_params *wrpll_params)
1627 {
1628 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1629 						 9000000000ULL,
1630 						 9600000000ULL };
1631 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1632 					    24, 28, 30, 32, 36, 40, 42, 44,
1633 					    48, 52, 54, 56, 60, 64, 66, 68,
1634 					    70, 72, 76, 78, 80, 84, 88, 90,
1635 					    92, 96, 98 };
1636 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1637 	static const struct {
1638 		const u8 *list;
1639 		int n_dividers;
1640 	} dividers[] = {
1641 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1642 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1643 	};
1644 	struct skl_wrpll_context ctx = {
1645 		.min_deviation = U64_MAX,
1646 	};
1647 	unsigned int dco, d, i;
1648 	unsigned int p0, p1, p2;
1649 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1650 
1651 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1652 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1653 			for (i = 0; i < dividers[d].n_dividers; i++) {
1654 				unsigned int p = dividers[d].list[i];
1655 				u64 dco_freq = p * afe_clock;
1656 
1657 				skl_wrpll_try_divider(&ctx,
1658 						      dco_central_freq[dco],
1659 						      dco_freq,
1660 						      p);
1661 				/*
1662 				 * Skip the remaining dividers if we're sure to
1663 				 * have found the definitive divider, we can't
1664 				 * improve a 0 deviation.
1665 				 */
1666 				if (ctx.min_deviation == 0)
1667 					goto skip_remaining_dividers;
1668 			}
1669 		}
1670 
1671 skip_remaining_dividers:
1672 		/*
1673 		 * If a solution is found with an even divider, prefer
1674 		 * this one.
1675 		 */
1676 		if (d == 0 && ctx.p)
1677 			break;
1678 	}
1679 
1680 	if (!ctx.p)
1681 		return -EINVAL;
1682 
1683 	/*
1684 	 * gcc incorrectly analyses that these can be used without being
1685 	 * initialized. To be fair, it's hard to guess.
1686 	 */
1687 	p0 = p1 = p2 = 0;
1688 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1689 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1690 				  ctx.central_freq, p0, p1, p2);
1691 
1692 	return 0;
1693 }
1694 
1695 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1696 				  const struct intel_shared_dpll *pll,
1697 				  const struct intel_dpll_hw_state *pll_state)
1698 {
1699 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1700 	u32 p0, p1, p2, dco_freq;
1701 
1702 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1703 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1704 
1705 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1706 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1707 	else
1708 		p1 = 1;
1709 
1710 
1711 	switch (p0) {
1712 	case DPLL_CFGCR2_PDIV_1:
1713 		p0 = 1;
1714 		break;
1715 	case DPLL_CFGCR2_PDIV_2:
1716 		p0 = 2;
1717 		break;
1718 	case DPLL_CFGCR2_PDIV_3:
1719 		p0 = 3;
1720 		break;
1721 	case DPLL_CFGCR2_PDIV_7_INVALID:
1722 		/*
1723 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1724 		 * handling it the same way as PDIV_7.
1725 		 */
1726 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1727 		fallthrough;
1728 	case DPLL_CFGCR2_PDIV_7:
1729 		p0 = 7;
1730 		break;
1731 	default:
1732 		MISSING_CASE(p0);
1733 		return 0;
1734 	}
1735 
1736 	switch (p2) {
1737 	case DPLL_CFGCR2_KDIV_5:
1738 		p2 = 5;
1739 		break;
1740 	case DPLL_CFGCR2_KDIV_2:
1741 		p2 = 2;
1742 		break;
1743 	case DPLL_CFGCR2_KDIV_3:
1744 		p2 = 3;
1745 		break;
1746 	case DPLL_CFGCR2_KDIV_1:
1747 		p2 = 1;
1748 		break;
1749 	default:
1750 		MISSING_CASE(p2);
1751 		return 0;
1752 	}
1753 
1754 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1755 		   ref_clock;
1756 
1757 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1758 		    ref_clock / 0x8000;
1759 
1760 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1761 		return 0;
1762 
1763 	return dco_freq / (p0 * p1 * p2 * 5);
1764 }
1765 
1766 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1767 {
1768 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1769 	struct skl_wrpll_params wrpll_params = {};
1770 	u32 ctrl1, cfgcr1, cfgcr2;
1771 	int ret;
1772 
1773 	/*
1774 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1775 	 * as the DPLL id in this function.
1776 	 */
1777 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1778 
1779 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1780 
1781 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1782 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1783 	if (ret)
1784 		return ret;
1785 
1786 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1787 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1788 		wrpll_params.dco_integer;
1789 
1790 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1791 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1792 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1793 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1794 		wrpll_params.central_freq;
1795 
1796 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1797 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1798 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1799 
1800 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1801 							&crtc_state->dpll_hw_state);
1802 
1803 	return 0;
1804 }
1805 
1806 static int
1807 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1808 {
1809 	u32 ctrl1;
1810 
1811 	/*
1812 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1813 	 * as the DPLL id in this function.
1814 	 */
1815 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1816 	switch (crtc_state->port_clock / 2) {
1817 	case 81000:
1818 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1819 		break;
1820 	case 135000:
1821 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1822 		break;
1823 	case 270000:
1824 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1825 		break;
1826 		/* eDP 1.4 rates */
1827 	case 162000:
1828 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1829 		break;
1830 	case 108000:
1831 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1832 		break;
1833 	case 216000:
1834 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1835 		break;
1836 	}
1837 
1838 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1839 
1840 	return 0;
1841 }
1842 
1843 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1844 				  const struct intel_shared_dpll *pll,
1845 				  const struct intel_dpll_hw_state *pll_state)
1846 {
1847 	int link_clock = 0;
1848 
1849 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1850 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1851 	case DPLL_CTRL1_LINK_RATE_810:
1852 		link_clock = 81000;
1853 		break;
1854 	case DPLL_CTRL1_LINK_RATE_1080:
1855 		link_clock = 108000;
1856 		break;
1857 	case DPLL_CTRL1_LINK_RATE_1350:
1858 		link_clock = 135000;
1859 		break;
1860 	case DPLL_CTRL1_LINK_RATE_1620:
1861 		link_clock = 162000;
1862 		break;
1863 	case DPLL_CTRL1_LINK_RATE_2160:
1864 		link_clock = 216000;
1865 		break;
1866 	case DPLL_CTRL1_LINK_RATE_2700:
1867 		link_clock = 270000;
1868 		break;
1869 	default:
1870 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1871 		break;
1872 	}
1873 
1874 	return link_clock * 2;
1875 }
1876 
1877 static int skl_compute_dpll(struct intel_atomic_state *state,
1878 			    struct intel_crtc *crtc,
1879 			    struct intel_encoder *encoder)
1880 {
1881 	struct intel_crtc_state *crtc_state =
1882 		intel_atomic_get_new_crtc_state(state, crtc);
1883 
1884 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1885 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1886 	else if (intel_crtc_has_dp_encoder(crtc_state))
1887 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1888 	else
1889 		return -EINVAL;
1890 }
1891 
1892 static int skl_get_dpll(struct intel_atomic_state *state,
1893 			struct intel_crtc *crtc,
1894 			struct intel_encoder *encoder)
1895 {
1896 	struct intel_crtc_state *crtc_state =
1897 		intel_atomic_get_new_crtc_state(state, crtc);
1898 	struct intel_shared_dpll *pll;
1899 
1900 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1901 		pll = intel_find_shared_dpll(state, crtc,
1902 					     &crtc_state->dpll_hw_state,
1903 					     BIT(DPLL_ID_SKL_DPLL0));
1904 	else
1905 		pll = intel_find_shared_dpll(state, crtc,
1906 					     &crtc_state->dpll_hw_state,
1907 					     BIT(DPLL_ID_SKL_DPLL3) |
1908 					     BIT(DPLL_ID_SKL_DPLL2) |
1909 					     BIT(DPLL_ID_SKL_DPLL1));
1910 	if (!pll)
1911 		return -EINVAL;
1912 
1913 	intel_reference_shared_dpll(state, crtc,
1914 				    pll, &crtc_state->dpll_hw_state);
1915 
1916 	crtc_state->shared_dpll = pll;
1917 
1918 	return 0;
1919 }
1920 
1921 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1922 				const struct intel_shared_dpll *pll,
1923 				const struct intel_dpll_hw_state *pll_state)
1924 {
1925 	/*
1926 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1927 	 * the internal shift for each field
1928 	 */
1929 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1930 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1931 	else
1932 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1933 }
1934 
1935 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1936 {
1937 	/* No SSC ref */
1938 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1939 }
1940 
1941 static void skl_dump_hw_state(struct drm_printer *p,
1942 			      const struct intel_dpll_hw_state *hw_state)
1943 {
1944 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1945 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1946 }
1947 
1948 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
1949 				 const struct intel_dpll_hw_state *b)
1950 {
1951 	return a->ctrl1 == b->ctrl1 &&
1952 		a->cfgcr1 == b->cfgcr1 &&
1953 		a->cfgcr2 == b->cfgcr2;
1954 }
1955 
1956 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1957 	.enable = skl_ddi_pll_enable,
1958 	.disable = skl_ddi_pll_disable,
1959 	.get_hw_state = skl_ddi_pll_get_hw_state,
1960 	.get_freq = skl_ddi_pll_get_freq,
1961 };
1962 
1963 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1964 	.enable = skl_ddi_dpll0_enable,
1965 	.disable = skl_ddi_dpll0_disable,
1966 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1967 	.get_freq = skl_ddi_pll_get_freq,
1968 };
1969 
1970 static const struct dpll_info skl_plls[] = {
1971 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
1972 	  .always_on = true, },
1973 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
1974 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
1975 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
1976 	{}
1977 };
1978 
1979 static const struct intel_dpll_mgr skl_pll_mgr = {
1980 	.dpll_info = skl_plls,
1981 	.compute_dplls = skl_compute_dpll,
1982 	.get_dplls = skl_get_dpll,
1983 	.put_dplls = intel_put_dpll,
1984 	.update_ref_clks = skl_update_dpll_ref_clks,
1985 	.dump_hw_state = skl_dump_hw_state,
1986 	.compare_hw_state = skl_compare_hw_state,
1987 };
1988 
1989 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1990 			       struct intel_shared_dpll *pll)
1991 {
1992 	u32 temp;
1993 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1994 	enum dpio_phy phy;
1995 	enum dpio_channel ch;
1996 
1997 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
1998 
1999 	/* Non-SSC reference */
2000 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2001 
2002 	if (IS_GEMINILAKE(i915)) {
2003 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2004 			     0, PORT_PLL_POWER_ENABLE);
2005 
2006 		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2007 				 PORT_PLL_POWER_STATE), 200))
2008 			drm_err(&i915->drm,
2009 				"Power state not set for PLL:%d\n", port);
2010 	}
2011 
2012 	/* Disable 10 bit clock */
2013 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2014 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2015 
2016 	/* Write P1 & P2 */
2017 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2018 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
2019 
2020 	/* Write M2 integer */
2021 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2022 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
2023 
2024 	/* Write N */
2025 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2026 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
2027 
2028 	/* Write M2 fraction */
2029 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2030 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
2031 
2032 	/* Write M2 fraction enable */
2033 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2034 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
2035 
2036 	/* Write coeff */
2037 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2038 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2039 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2040 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2041 	temp |= pll->state.hw_state.pll6;
2042 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2043 
2044 	/* Write calibration val */
2045 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2046 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
2047 
2048 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2049 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2050 
2051 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2052 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2053 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2054 	temp |= pll->state.hw_state.pll10;
2055 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2056 
2057 	/* Recalibrate with new settings */
2058 	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2059 	temp |= PORT_PLL_RECALIBRATE;
2060 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2061 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2062 	temp |= pll->state.hw_state.ebb4;
2063 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2064 
2065 	/* Enable PLL */
2066 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2067 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2068 
2069 	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2070 			200))
2071 		drm_err(&i915->drm, "PLL %d not locked\n", port);
2072 
2073 	if (IS_GEMINILAKE(i915)) {
2074 		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2075 		temp |= DCC_DELAY_RANGE_2;
2076 		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2077 	}
2078 
2079 	/*
2080 	 * While we write to the group register to program all lanes at once we
2081 	 * can read only lane registers and we pick lanes 0/1 for that.
2082 	 */
2083 	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2084 	temp &= ~LANE_STAGGER_MASK;
2085 	temp &= ~LANESTAGGER_STRAP_OVRD;
2086 	temp |= pll->state.hw_state.pcsdw12;
2087 	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2088 }
2089 
2090 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2091 				struct intel_shared_dpll *pll)
2092 {
2093 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2094 
2095 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2096 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2097 
2098 	if (IS_GEMINILAKE(i915)) {
2099 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2100 			     PORT_PLL_POWER_ENABLE, 0);
2101 
2102 		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2103 				  PORT_PLL_POWER_STATE), 200))
2104 			drm_err(&i915->drm,
2105 				"Power state not reset for PLL:%d\n", port);
2106 	}
2107 }
2108 
2109 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2110 				     struct intel_shared_dpll *pll,
2111 				     struct intel_dpll_hw_state *hw_state)
2112 {
2113 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2114 	intel_wakeref_t wakeref;
2115 	enum dpio_phy phy;
2116 	enum dpio_channel ch;
2117 	u32 val;
2118 	bool ret;
2119 
2120 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2121 
2122 	wakeref = intel_display_power_get_if_enabled(i915,
2123 						     POWER_DOMAIN_DISPLAY_CORE);
2124 	if (!wakeref)
2125 		return false;
2126 
2127 	ret = false;
2128 
2129 	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2130 	if (!(val & PORT_PLL_ENABLE))
2131 		goto out;
2132 
2133 	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2134 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2135 
2136 	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2137 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2138 
2139 	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2140 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2141 
2142 	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2143 	hw_state->pll1 &= PORT_PLL_N_MASK;
2144 
2145 	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2146 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2147 
2148 	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2149 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2150 
2151 	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2152 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2153 			  PORT_PLL_INT_COEFF_MASK |
2154 			  PORT_PLL_GAIN_CTL_MASK;
2155 
2156 	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2157 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2158 
2159 	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2160 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2161 
2162 	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2163 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2164 			   PORT_PLL_DCO_AMP_MASK;
2165 
2166 	/*
2167 	 * While we write to the group register to program all lanes at once we
2168 	 * can read only lane registers. We configure all lanes the same way, so
2169 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2170 	 */
2171 	hw_state->pcsdw12 = intel_de_read(i915,
2172 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2173 	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2174 		drm_dbg(&i915->drm,
2175 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2176 			hw_state->pcsdw12,
2177 			intel_de_read(i915,
2178 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2179 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2180 
2181 	ret = true;
2182 
2183 out:
2184 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2185 
2186 	return ret;
2187 }
2188 
2189 /* pre-calculated values for DP linkrates */
2190 static const struct dpll bxt_dp_clk_val[] = {
2191 	/* m2 is .22 binary fixed point */
2192 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2193 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2194 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2195 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2196 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2197 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2198 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2199 };
2200 
2201 static int
2202 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2203 			  struct dpll *clk_div)
2204 {
2205 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2206 
2207 	/* Calculate HDMI div */
2208 	/*
2209 	 * FIXME: tie the following calculation into
2210 	 * i9xx_crtc_compute_clock
2211 	 */
2212 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2213 		return -EINVAL;
2214 
2215 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2216 
2217 	return 0;
2218 }
2219 
2220 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2221 				    struct dpll *clk_div)
2222 {
2223 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2224 	int i;
2225 
2226 	*clk_div = bxt_dp_clk_val[0];
2227 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2228 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2229 			*clk_div = bxt_dp_clk_val[i];
2230 			break;
2231 		}
2232 	}
2233 
2234 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2235 
2236 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2237 		    clk_div->dot != crtc_state->port_clock);
2238 }
2239 
2240 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2241 				     const struct dpll *clk_div)
2242 {
2243 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2244 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2245 	int clock = crtc_state->port_clock;
2246 	int vco = clk_div->vco;
2247 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2248 	u32 lanestagger;
2249 
2250 	if (vco >= 6200000 && vco <= 6700000) {
2251 		prop_coef = 4;
2252 		int_coef = 9;
2253 		gain_ctl = 3;
2254 		targ_cnt = 8;
2255 	} else if ((vco > 5400000 && vco < 6200000) ||
2256 			(vco >= 4800000 && vco < 5400000)) {
2257 		prop_coef = 5;
2258 		int_coef = 11;
2259 		gain_ctl = 3;
2260 		targ_cnt = 9;
2261 	} else if (vco == 5400000) {
2262 		prop_coef = 3;
2263 		int_coef = 8;
2264 		gain_ctl = 1;
2265 		targ_cnt = 9;
2266 	} else {
2267 		drm_err(&i915->drm, "Invalid VCO\n");
2268 		return -EINVAL;
2269 	}
2270 
2271 	if (clock > 270000)
2272 		lanestagger = 0x18;
2273 	else if (clock > 135000)
2274 		lanestagger = 0x0d;
2275 	else if (clock > 67000)
2276 		lanestagger = 0x07;
2277 	else if (clock > 33000)
2278 		lanestagger = 0x04;
2279 	else
2280 		lanestagger = 0x02;
2281 
2282 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2283 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2284 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2285 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2286 
2287 	if (clk_div->m2 & 0x3fffff)
2288 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2289 
2290 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2291 		PORT_PLL_INT_COEFF(int_coef) |
2292 		PORT_PLL_GAIN_CTL(gain_ctl);
2293 
2294 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2295 
2296 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2297 
2298 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2299 		PORT_PLL_DCO_AMP_OVR_EN_H;
2300 
2301 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2302 
2303 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2304 
2305 	return 0;
2306 }
2307 
2308 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2309 				const struct intel_shared_dpll *pll,
2310 				const struct intel_dpll_hw_state *pll_state)
2311 {
2312 	struct dpll clock;
2313 
2314 	clock.m1 = 2;
2315 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2316 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2317 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2318 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2319 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2320 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2321 
2322 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2323 }
2324 
2325 static int
2326 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2327 {
2328 	struct dpll clk_div = {};
2329 
2330 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2331 
2332 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2333 }
2334 
2335 static int
2336 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2337 {
2338 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2339 	struct dpll clk_div = {};
2340 	int ret;
2341 
2342 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2343 
2344 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2345 	if (ret)
2346 		return ret;
2347 
2348 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2349 						      &crtc_state->dpll_hw_state);
2350 
2351 	return 0;
2352 }
2353 
2354 static int bxt_compute_dpll(struct intel_atomic_state *state,
2355 			    struct intel_crtc *crtc,
2356 			    struct intel_encoder *encoder)
2357 {
2358 	struct intel_crtc_state *crtc_state =
2359 		intel_atomic_get_new_crtc_state(state, crtc);
2360 
2361 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2362 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2363 	else if (intel_crtc_has_dp_encoder(crtc_state))
2364 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2365 	else
2366 		return -EINVAL;
2367 }
2368 
2369 static int bxt_get_dpll(struct intel_atomic_state *state,
2370 			struct intel_crtc *crtc,
2371 			struct intel_encoder *encoder)
2372 {
2373 	struct intel_crtc_state *crtc_state =
2374 		intel_atomic_get_new_crtc_state(state, crtc);
2375 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2376 	struct intel_shared_dpll *pll;
2377 	enum intel_dpll_id id;
2378 
2379 	/* 1:1 mapping between ports and PLLs */
2380 	id = (enum intel_dpll_id) encoder->port;
2381 	pll = intel_get_shared_dpll_by_id(i915, id);
2382 
2383 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2384 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2385 
2386 	intel_reference_shared_dpll(state, crtc,
2387 				    pll, &crtc_state->dpll_hw_state);
2388 
2389 	crtc_state->shared_dpll = pll;
2390 
2391 	return 0;
2392 }
2393 
2394 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2395 {
2396 	i915->display.dpll.ref_clks.ssc = 100000;
2397 	i915->display.dpll.ref_clks.nssc = 100000;
2398 	/* DSI non-SSC ref 19.2MHz */
2399 }
2400 
2401 static void bxt_dump_hw_state(struct drm_printer *p,
2402 			      const struct intel_dpll_hw_state *hw_state)
2403 {
2404 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2405 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2406 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2407 		   hw_state->ebb0, hw_state->ebb4,
2408 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2409 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2410 		   hw_state->pcsdw12);
2411 }
2412 
2413 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
2414 				 const struct intel_dpll_hw_state *b)
2415 {
2416 	return a->ebb0 == b->ebb0 &&
2417 		a->ebb4 == b->ebb4 &&
2418 		a->pll0 == b->pll0 &&
2419 		a->pll1 == b->pll1 &&
2420 		a->pll2 == b->pll2 &&
2421 		a->pll3 == b->pll3 &&
2422 		a->pll6 == b->pll6 &&
2423 		a->pll8 == b->pll8 &&
2424 		a->pll10 == b->pll10 &&
2425 		a->pcsdw12 == b->pcsdw12;
2426 }
2427 
2428 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2429 	.enable = bxt_ddi_pll_enable,
2430 	.disable = bxt_ddi_pll_disable,
2431 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2432 	.get_freq = bxt_ddi_pll_get_freq,
2433 };
2434 
2435 static const struct dpll_info bxt_plls[] = {
2436 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2437 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2438 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2439 	{}
2440 };
2441 
2442 static const struct intel_dpll_mgr bxt_pll_mgr = {
2443 	.dpll_info = bxt_plls,
2444 	.compute_dplls = bxt_compute_dpll,
2445 	.get_dplls = bxt_get_dpll,
2446 	.put_dplls = intel_put_dpll,
2447 	.update_ref_clks = bxt_update_dpll_ref_clks,
2448 	.dump_hw_state = bxt_dump_hw_state,
2449 	.compare_hw_state = bxt_compare_hw_state,
2450 };
2451 
2452 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2453 				      int *qdiv, int *kdiv)
2454 {
2455 	/* even dividers */
2456 	if (bestdiv % 2 == 0) {
2457 		if (bestdiv == 2) {
2458 			*pdiv = 2;
2459 			*qdiv = 1;
2460 			*kdiv = 1;
2461 		} else if (bestdiv % 4 == 0) {
2462 			*pdiv = 2;
2463 			*qdiv = bestdiv / 4;
2464 			*kdiv = 2;
2465 		} else if (bestdiv % 6 == 0) {
2466 			*pdiv = 3;
2467 			*qdiv = bestdiv / 6;
2468 			*kdiv = 2;
2469 		} else if (bestdiv % 5 == 0) {
2470 			*pdiv = 5;
2471 			*qdiv = bestdiv / 10;
2472 			*kdiv = 2;
2473 		} else if (bestdiv % 14 == 0) {
2474 			*pdiv = 7;
2475 			*qdiv = bestdiv / 14;
2476 			*kdiv = 2;
2477 		}
2478 	} else {
2479 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2480 			*pdiv = bestdiv;
2481 			*qdiv = 1;
2482 			*kdiv = 1;
2483 		} else { /* 9, 15, 21 */
2484 			*pdiv = bestdiv / 3;
2485 			*qdiv = 1;
2486 			*kdiv = 3;
2487 		}
2488 	}
2489 }
2490 
2491 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2492 				      u32 dco_freq, u32 ref_freq,
2493 				      int pdiv, int qdiv, int kdiv)
2494 {
2495 	u32 dco;
2496 
2497 	switch (kdiv) {
2498 	case 1:
2499 		params->kdiv = 1;
2500 		break;
2501 	case 2:
2502 		params->kdiv = 2;
2503 		break;
2504 	case 3:
2505 		params->kdiv = 4;
2506 		break;
2507 	default:
2508 		WARN(1, "Incorrect KDiv\n");
2509 	}
2510 
2511 	switch (pdiv) {
2512 	case 2:
2513 		params->pdiv = 1;
2514 		break;
2515 	case 3:
2516 		params->pdiv = 2;
2517 		break;
2518 	case 5:
2519 		params->pdiv = 4;
2520 		break;
2521 	case 7:
2522 		params->pdiv = 8;
2523 		break;
2524 	default:
2525 		WARN(1, "Incorrect PDiv\n");
2526 	}
2527 
2528 	WARN_ON(kdiv != 2 && qdiv != 1);
2529 
2530 	params->qdiv_ratio = qdiv;
2531 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2532 
2533 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2534 
2535 	params->dco_integer = dco >> 15;
2536 	params->dco_fraction = dco & 0x7fff;
2537 }
2538 
2539 /*
2540  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2541  * Program half of the nominal DCO divider fraction value.
2542  */
2543 static bool
2544 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2545 {
2546 	return ((IS_ELKHARTLAKE(i915) &&
2547 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2548 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2549 		 i915->display.dpll.ref_clks.nssc == 38400;
2550 }
2551 
2552 struct icl_combo_pll_params {
2553 	int clock;
2554 	struct skl_wrpll_params wrpll;
2555 };
2556 
2557 /*
2558  * These values alrea already adjusted: they're the bits we write to the
2559  * registers, not the logical values.
2560  */
2561 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2562 	{ 540000,
2563 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2564 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2565 	{ 270000,
2566 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2567 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2568 	{ 162000,
2569 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2570 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2571 	{ 324000,
2572 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2573 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2574 	{ 216000,
2575 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2576 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2577 	{ 432000,
2578 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2579 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2580 	{ 648000,
2581 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2582 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2583 	{ 810000,
2584 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2585 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2586 };
2587 
2588 
2589 /* Also used for 38.4 MHz values. */
2590 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2591 	{ 540000,
2592 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2593 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2594 	{ 270000,
2595 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2596 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2597 	{ 162000,
2598 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2599 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2600 	{ 324000,
2601 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2602 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2603 	{ 216000,
2604 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2605 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2606 	{ 432000,
2607 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2608 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2609 	{ 648000,
2610 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2611 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2612 	{ 810000,
2613 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2614 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2615 };
2616 
2617 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2618 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2619 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2620 };
2621 
2622 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2623 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2624 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2625 };
2626 
2627 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2628 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2629 	/* the following params are unused */
2630 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2631 };
2632 
2633 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2634 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2635 	/* the following params are unused */
2636 };
2637 
2638 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2639 				 struct skl_wrpll_params *pll_params)
2640 {
2641 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2642 	const struct icl_combo_pll_params *params =
2643 		i915->display.dpll.ref_clks.nssc == 24000 ?
2644 		icl_dp_combo_pll_24MHz_values :
2645 		icl_dp_combo_pll_19_2MHz_values;
2646 	int clock = crtc_state->port_clock;
2647 	int i;
2648 
2649 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2650 		if (clock == params[i].clock) {
2651 			*pll_params = params[i].wrpll;
2652 			return 0;
2653 		}
2654 	}
2655 
2656 	MISSING_CASE(clock);
2657 	return -EINVAL;
2658 }
2659 
2660 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2661 			    struct skl_wrpll_params *pll_params)
2662 {
2663 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2664 
2665 	if (DISPLAY_VER(i915) >= 12) {
2666 		switch (i915->display.dpll.ref_clks.nssc) {
2667 		default:
2668 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2669 			fallthrough;
2670 		case 19200:
2671 		case 38400:
2672 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2673 			break;
2674 		case 24000:
2675 			*pll_params = tgl_tbt_pll_24MHz_values;
2676 			break;
2677 		}
2678 	} else {
2679 		switch (i915->display.dpll.ref_clks.nssc) {
2680 		default:
2681 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2682 			fallthrough;
2683 		case 19200:
2684 		case 38400:
2685 			*pll_params = icl_tbt_pll_19_2MHz_values;
2686 			break;
2687 		case 24000:
2688 			*pll_params = icl_tbt_pll_24MHz_values;
2689 			break;
2690 		}
2691 	}
2692 
2693 	return 0;
2694 }
2695 
2696 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2697 				    const struct intel_shared_dpll *pll,
2698 				    const struct intel_dpll_hw_state *pll_state)
2699 {
2700 	/*
2701 	 * The PLL outputs multiple frequencies at the same time, selection is
2702 	 * made at DDI clock mux level.
2703 	 */
2704 	drm_WARN_ON(&i915->drm, 1);
2705 
2706 	return 0;
2707 }
2708 
2709 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2710 {
2711 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2712 
2713 	/*
2714 	 * For ICL+, the spec states: if reference frequency is 38.4,
2715 	 * use 19.2 because the DPLL automatically divides that by 2.
2716 	 */
2717 	if (ref_clock == 38400)
2718 		ref_clock = 19200;
2719 
2720 	return ref_clock;
2721 }
2722 
2723 static int
2724 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2725 	       struct skl_wrpll_params *wrpll_params)
2726 {
2727 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2728 	int ref_clock = icl_wrpll_ref_clock(i915);
2729 	u32 afe_clock = crtc_state->port_clock * 5;
2730 	u32 dco_min = 7998000;
2731 	u32 dco_max = 10000000;
2732 	u32 dco_mid = (dco_min + dco_max) / 2;
2733 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2734 					 18, 20, 24, 28, 30, 32,  36,  40,
2735 					 42, 44, 48, 50, 52, 54,  56,  60,
2736 					 64, 66, 68, 70, 72, 76,  78,  80,
2737 					 84, 88, 90, 92, 96, 98, 100, 102,
2738 					  3,  5,  7,  9, 15, 21 };
2739 	u32 dco, best_dco = 0, dco_centrality = 0;
2740 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2741 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2742 
2743 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2744 		dco = afe_clock * dividers[d];
2745 
2746 		if (dco <= dco_max && dco >= dco_min) {
2747 			dco_centrality = abs(dco - dco_mid);
2748 
2749 			if (dco_centrality < best_dco_centrality) {
2750 				best_dco_centrality = dco_centrality;
2751 				best_div = dividers[d];
2752 				best_dco = dco;
2753 			}
2754 		}
2755 	}
2756 
2757 	if (best_div == 0)
2758 		return -EINVAL;
2759 
2760 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2761 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2762 				  pdiv, qdiv, kdiv);
2763 
2764 	return 0;
2765 }
2766 
2767 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2768 				      const struct intel_shared_dpll *pll,
2769 				      const struct intel_dpll_hw_state *pll_state)
2770 {
2771 	int ref_clock = icl_wrpll_ref_clock(i915);
2772 	u32 dco_fraction;
2773 	u32 p0, p1, p2, dco_freq;
2774 
2775 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2776 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2777 
2778 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2779 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2780 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2781 	else
2782 		p1 = 1;
2783 
2784 	switch (p0) {
2785 	case DPLL_CFGCR1_PDIV_2:
2786 		p0 = 2;
2787 		break;
2788 	case DPLL_CFGCR1_PDIV_3:
2789 		p0 = 3;
2790 		break;
2791 	case DPLL_CFGCR1_PDIV_5:
2792 		p0 = 5;
2793 		break;
2794 	case DPLL_CFGCR1_PDIV_7:
2795 		p0 = 7;
2796 		break;
2797 	}
2798 
2799 	switch (p2) {
2800 	case DPLL_CFGCR1_KDIV_1:
2801 		p2 = 1;
2802 		break;
2803 	case DPLL_CFGCR1_KDIV_2:
2804 		p2 = 2;
2805 		break;
2806 	case DPLL_CFGCR1_KDIV_3:
2807 		p2 = 3;
2808 		break;
2809 	}
2810 
2811 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2812 		   ref_clock;
2813 
2814 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2815 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2816 
2817 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2818 		dco_fraction *= 2;
2819 
2820 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2821 
2822 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2823 		return 0;
2824 
2825 	return dco_freq / (p0 * p1 * p2 * 5);
2826 }
2827 
2828 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2829 				const struct skl_wrpll_params *pll_params,
2830 				struct intel_dpll_hw_state *pll_state)
2831 {
2832 	u32 dco_fraction = pll_params->dco_fraction;
2833 
2834 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2835 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2836 
2837 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2838 			    pll_params->dco_integer;
2839 
2840 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2841 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2842 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2843 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2844 
2845 	if (DISPLAY_VER(i915) >= 12)
2846 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2847 	else
2848 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2849 
2850 	if (i915->display.vbt.override_afc_startup)
2851 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2852 }
2853 
2854 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2855 				    u32 *target_dco_khz,
2856 				    struct intel_dpll_hw_state *state,
2857 				    bool is_dkl)
2858 {
2859 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2860 	u32 dco_min_freq, dco_max_freq;
2861 	unsigned int i;
2862 	int div2;
2863 
2864 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2865 	dco_max_freq = is_dp ? 8100000 : 10000000;
2866 
2867 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2868 		int div1 = div1_vals[i];
2869 
2870 		for (div2 = 10; div2 > 0; div2--) {
2871 			int dco = div1 * div2 * clock_khz * 5;
2872 			int a_divratio, tlinedrv, inputsel;
2873 			u32 hsdiv;
2874 
2875 			if (dco < dco_min_freq || dco > dco_max_freq)
2876 				continue;
2877 
2878 			if (div2 >= 2) {
2879 				/*
2880 				 * Note: a_divratio not matching TGL BSpec
2881 				 * algorithm but matching hardcoded values and
2882 				 * working on HW for DP alt-mode at least
2883 				 */
2884 				a_divratio = is_dp ? 10 : 5;
2885 				tlinedrv = is_dkl ? 1 : 2;
2886 			} else {
2887 				a_divratio = 5;
2888 				tlinedrv = 0;
2889 			}
2890 			inputsel = is_dp ? 0 : 1;
2891 
2892 			switch (div1) {
2893 			default:
2894 				MISSING_CASE(div1);
2895 				fallthrough;
2896 			case 2:
2897 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2898 				break;
2899 			case 3:
2900 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2901 				break;
2902 			case 5:
2903 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2904 				break;
2905 			case 7:
2906 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2907 				break;
2908 			}
2909 
2910 			*target_dco_khz = dco;
2911 
2912 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2913 
2914 			state->mg_clktop2_coreclkctl1 =
2915 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2916 
2917 			state->mg_clktop2_hsclkctl =
2918 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2919 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2920 				hsdiv |
2921 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2922 
2923 			return 0;
2924 		}
2925 	}
2926 
2927 	return -EINVAL;
2928 }
2929 
2930 /*
2931  * The specification for this function uses real numbers, so the math had to be
2932  * adapted to integer-only calculation, that's why it looks so different.
2933  */
2934 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2935 				 struct intel_dpll_hw_state *pll_state)
2936 {
2937 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2938 	int refclk_khz = i915->display.dpll.ref_clks.nssc;
2939 	int clock = crtc_state->port_clock;
2940 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2941 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2942 	u32 prop_coeff, int_coeff;
2943 	u32 tdc_targetcnt, feedfwgain;
2944 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2945 	u64 tmp;
2946 	bool use_ssc = false;
2947 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2948 	bool is_dkl = DISPLAY_VER(i915) >= 12;
2949 	int ret;
2950 
2951 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2952 				       pll_state, is_dkl);
2953 	if (ret)
2954 		return ret;
2955 
2956 	m1div = 2;
2957 	m2div_int = dco_khz / (refclk_khz * m1div);
2958 	if (m2div_int > 255) {
2959 		if (!is_dkl) {
2960 			m1div = 4;
2961 			m2div_int = dco_khz / (refclk_khz * m1div);
2962 		}
2963 
2964 		if (m2div_int > 255)
2965 			return -EINVAL;
2966 	}
2967 	m2div_rem = dco_khz % (refclk_khz * m1div);
2968 
2969 	tmp = (u64)m2div_rem * (1 << 22);
2970 	do_div(tmp, refclk_khz * m1div);
2971 	m2div_frac = tmp;
2972 
2973 	switch (refclk_khz) {
2974 	case 19200:
2975 		iref_ndiv = 1;
2976 		iref_trim = 28;
2977 		iref_pulse_w = 1;
2978 		break;
2979 	case 24000:
2980 		iref_ndiv = 1;
2981 		iref_trim = 25;
2982 		iref_pulse_w = 2;
2983 		break;
2984 	case 38400:
2985 		iref_ndiv = 2;
2986 		iref_trim = 28;
2987 		iref_pulse_w = 1;
2988 		break;
2989 	default:
2990 		MISSING_CASE(refclk_khz);
2991 		return -EINVAL;
2992 	}
2993 
2994 	/*
2995 	 * tdc_res = 0.000003
2996 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2997 	 *
2998 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2999 	 * was supposed to be a division, but we rearranged the operations of
3000 	 * the formula to avoid early divisions so we don't multiply the
3001 	 * rounding errors.
3002 	 *
3003 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3004 	 * we also rearrange to work with integers.
3005 	 *
3006 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3007 	 * last division by 10.
3008 	 */
3009 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3010 
3011 	/*
3012 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3013 	 * 32 bits. That's not a problem since we round the division down
3014 	 * anyway.
3015 	 */
3016 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3017 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3018 
3019 	if (dco_khz >= 9000000) {
3020 		prop_coeff = 5;
3021 		int_coeff = 10;
3022 	} else {
3023 		prop_coeff = 4;
3024 		int_coeff = 8;
3025 	}
3026 
3027 	if (use_ssc) {
3028 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3029 		do_div(tmp, refclk_khz * m1div * 10000);
3030 		ssc_stepsize = tmp;
3031 
3032 		tmp = mul_u32_u32(dco_khz, 1000);
3033 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3034 	} else {
3035 		ssc_stepsize = 0;
3036 		ssc_steplen = 0;
3037 	}
3038 	ssc_steplog = 4;
3039 
3040 	/* write pll_state calculations */
3041 	if (is_dkl) {
3042 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3043 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3044 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3045 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3046 		if (i915->display.vbt.override_afc_startup) {
3047 			u8 val = i915->display.vbt.override_afc_startup_val;
3048 
3049 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3050 		}
3051 
3052 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3053 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3054 
3055 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3056 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3057 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3058 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3059 
3060 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3061 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3062 
3063 		pll_state->mg_pll_tdc_coldst_bias =
3064 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3065 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3066 
3067 	} else {
3068 		pll_state->mg_pll_div0 =
3069 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3070 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3071 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3072 
3073 		pll_state->mg_pll_div1 =
3074 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3075 			MG_PLL_DIV1_DITHER_DIV_2 |
3076 			MG_PLL_DIV1_NDIVRATIO(1) |
3077 			MG_PLL_DIV1_FBPREDIV(m1div);
3078 
3079 		pll_state->mg_pll_lf =
3080 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3081 			MG_PLL_LF_AFCCNTSEL_512 |
3082 			MG_PLL_LF_GAINCTRL(1) |
3083 			MG_PLL_LF_INT_COEFF(int_coeff) |
3084 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3085 
3086 		pll_state->mg_pll_frac_lock =
3087 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3088 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3089 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3090 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3091 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3092 		if (use_ssc || m2div_rem > 0)
3093 			pll_state->mg_pll_frac_lock |=
3094 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3095 
3096 		pll_state->mg_pll_ssc =
3097 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3098 			MG_PLL_SSC_TYPE(2) |
3099 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3100 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3101 			MG_PLL_SSC_FLLEN |
3102 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3103 
3104 		pll_state->mg_pll_tdc_coldst_bias =
3105 			MG_PLL_TDC_COLDST_COLDSTART |
3106 			MG_PLL_TDC_COLDST_IREFINT_EN |
3107 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3108 			MG_PLL_TDC_TDCOVCCORR_EN |
3109 			MG_PLL_TDC_TDCSEL(3);
3110 
3111 		pll_state->mg_pll_bias =
3112 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3113 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3114 			MG_PLL_BIAS_BIAS_BONUS(10) |
3115 			MG_PLL_BIAS_BIASCAL_EN |
3116 			MG_PLL_BIAS_CTRIM(12) |
3117 			MG_PLL_BIAS_VREF_RDAC(4) |
3118 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3119 
3120 		if (refclk_khz == 38400) {
3121 			pll_state->mg_pll_tdc_coldst_bias_mask =
3122 				MG_PLL_TDC_COLDST_COLDSTART;
3123 			pll_state->mg_pll_bias_mask = 0;
3124 		} else {
3125 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3126 			pll_state->mg_pll_bias_mask = -1U;
3127 		}
3128 
3129 		pll_state->mg_pll_tdc_coldst_bias &=
3130 			pll_state->mg_pll_tdc_coldst_bias_mask;
3131 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3132 	}
3133 
3134 	return 0;
3135 }
3136 
3137 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3138 				   const struct intel_shared_dpll *pll,
3139 				   const struct intel_dpll_hw_state *pll_state)
3140 {
3141 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3142 	u64 tmp;
3143 
3144 	ref_clock = i915->display.dpll.ref_clks.nssc;
3145 
3146 	if (DISPLAY_VER(i915) >= 12) {
3147 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3148 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3149 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3150 
3151 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3152 			m2_frac = pll_state->mg_pll_bias &
3153 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3154 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3155 		} else {
3156 			m2_frac = 0;
3157 		}
3158 	} else {
3159 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3160 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3161 
3162 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3163 			m2_frac = pll_state->mg_pll_div0 &
3164 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3165 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3166 		} else {
3167 			m2_frac = 0;
3168 		}
3169 	}
3170 
3171 	switch (pll_state->mg_clktop2_hsclkctl &
3172 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3173 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3174 		div1 = 2;
3175 		break;
3176 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3177 		div1 = 3;
3178 		break;
3179 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3180 		div1 = 5;
3181 		break;
3182 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3183 		div1 = 7;
3184 		break;
3185 	default:
3186 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3187 		return 0;
3188 	}
3189 
3190 	div2 = (pll_state->mg_clktop2_hsclkctl &
3191 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3192 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3193 
3194 	/* div2 value of 0 is same as 1 means no div */
3195 	if (div2 == 0)
3196 		div2 = 1;
3197 
3198 	/*
3199 	 * Adjust the original formula to delay the division by 2^22 in order to
3200 	 * minimize possible rounding errors.
3201 	 */
3202 	tmp = (u64)m1 * m2_int * ref_clock +
3203 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3204 	tmp = div_u64(tmp, 5 * div1 * div2);
3205 
3206 	return tmp;
3207 }
3208 
3209 /**
3210  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3211  * @crtc_state: state for the CRTC to select the DPLL for
3212  * @port_dpll_id: the active @port_dpll_id to select
3213  *
3214  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3215  * CRTC.
3216  */
3217 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3218 			      enum icl_port_dpll_id port_dpll_id)
3219 {
3220 	struct icl_port_dpll *port_dpll =
3221 		&crtc_state->icl_port_dplls[port_dpll_id];
3222 
3223 	crtc_state->shared_dpll = port_dpll->pll;
3224 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3225 }
3226 
3227 static void icl_update_active_dpll(struct intel_atomic_state *state,
3228 				   struct intel_crtc *crtc,
3229 				   struct intel_encoder *encoder)
3230 {
3231 	struct intel_crtc_state *crtc_state =
3232 		intel_atomic_get_new_crtc_state(state, crtc);
3233 	struct intel_digital_port *primary_port;
3234 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3235 
3236 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3237 		enc_to_mst(encoder)->primary :
3238 		enc_to_dig_port(encoder);
3239 
3240 	if (primary_port &&
3241 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3242 	     intel_tc_port_in_legacy_mode(primary_port)))
3243 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3244 
3245 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3246 }
3247 
3248 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3249 				      struct intel_crtc *crtc)
3250 {
3251 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3252 	struct intel_crtc_state *crtc_state =
3253 		intel_atomic_get_new_crtc_state(state, crtc);
3254 	struct icl_port_dpll *port_dpll =
3255 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3256 	struct skl_wrpll_params pll_params = {};
3257 	int ret;
3258 
3259 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3260 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3261 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3262 	else
3263 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3264 
3265 	if (ret)
3266 		return ret;
3267 
3268 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3269 
3270 	/* this is mainly for the fastset check */
3271 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3272 
3273 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3274 							    &port_dpll->hw_state);
3275 
3276 	return 0;
3277 }
3278 
3279 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3280 				  struct intel_crtc *crtc,
3281 				  struct intel_encoder *encoder)
3282 {
3283 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3284 	struct intel_crtc_state *crtc_state =
3285 		intel_atomic_get_new_crtc_state(state, crtc);
3286 	struct icl_port_dpll *port_dpll =
3287 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3288 	enum port port = encoder->port;
3289 	unsigned long dpll_mask;
3290 
3291 	if (IS_ALDERLAKE_S(i915)) {
3292 		dpll_mask =
3293 			BIT(DPLL_ID_DG1_DPLL3) |
3294 			BIT(DPLL_ID_DG1_DPLL2) |
3295 			BIT(DPLL_ID_ICL_DPLL1) |
3296 			BIT(DPLL_ID_ICL_DPLL0);
3297 	} else if (IS_DG1(i915)) {
3298 		if (port == PORT_D || port == PORT_E) {
3299 			dpll_mask =
3300 				BIT(DPLL_ID_DG1_DPLL2) |
3301 				BIT(DPLL_ID_DG1_DPLL3);
3302 		} else {
3303 			dpll_mask =
3304 				BIT(DPLL_ID_DG1_DPLL0) |
3305 				BIT(DPLL_ID_DG1_DPLL1);
3306 		}
3307 	} else if (IS_ROCKETLAKE(i915)) {
3308 		dpll_mask =
3309 			BIT(DPLL_ID_EHL_DPLL4) |
3310 			BIT(DPLL_ID_ICL_DPLL1) |
3311 			BIT(DPLL_ID_ICL_DPLL0);
3312 	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3313 		   port != PORT_A) {
3314 		dpll_mask =
3315 			BIT(DPLL_ID_EHL_DPLL4) |
3316 			BIT(DPLL_ID_ICL_DPLL1) |
3317 			BIT(DPLL_ID_ICL_DPLL0);
3318 	} else {
3319 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3320 	}
3321 
3322 	/* Eliminate DPLLs from consideration if reserved by HTI */
3323 	dpll_mask &= ~intel_hti_dpll_mask(i915);
3324 
3325 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3326 						&port_dpll->hw_state,
3327 						dpll_mask);
3328 	if (!port_dpll->pll)
3329 		return -EINVAL;
3330 
3331 	intel_reference_shared_dpll(state, crtc,
3332 				    port_dpll->pll, &port_dpll->hw_state);
3333 
3334 	icl_update_active_dpll(state, crtc, encoder);
3335 
3336 	return 0;
3337 }
3338 
3339 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3340 				    struct intel_crtc *crtc)
3341 {
3342 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3343 	struct intel_crtc_state *crtc_state =
3344 		intel_atomic_get_new_crtc_state(state, crtc);
3345 	const struct intel_crtc_state *old_crtc_state =
3346 		intel_atomic_get_old_crtc_state(state, crtc);
3347 	struct icl_port_dpll *port_dpll =
3348 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3349 	struct skl_wrpll_params pll_params = {};
3350 	int ret;
3351 
3352 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3353 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3354 	if (ret)
3355 		return ret;
3356 
3357 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3358 
3359 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3360 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3361 	if (ret)
3362 		return ret;
3363 
3364 	/* this is mainly for the fastset check */
3365 	if (old_crtc_state->shared_dpll &&
3366 	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3367 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3368 	else
3369 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3370 
3371 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3372 							 &port_dpll->hw_state);
3373 
3374 	return 0;
3375 }
3376 
3377 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3378 				struct intel_crtc *crtc,
3379 				struct intel_encoder *encoder)
3380 {
3381 	struct intel_crtc_state *crtc_state =
3382 		intel_atomic_get_new_crtc_state(state, crtc);
3383 	struct icl_port_dpll *port_dpll =
3384 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3385 	enum intel_dpll_id dpll_id;
3386 	int ret;
3387 
3388 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3389 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3390 						&port_dpll->hw_state,
3391 						BIT(DPLL_ID_ICL_TBTPLL));
3392 	if (!port_dpll->pll)
3393 		return -EINVAL;
3394 	intel_reference_shared_dpll(state, crtc,
3395 				    port_dpll->pll, &port_dpll->hw_state);
3396 
3397 
3398 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3399 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3400 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3401 						&port_dpll->hw_state,
3402 						BIT(dpll_id));
3403 	if (!port_dpll->pll) {
3404 		ret = -EINVAL;
3405 		goto err_unreference_tbt_pll;
3406 	}
3407 	intel_reference_shared_dpll(state, crtc,
3408 				    port_dpll->pll, &port_dpll->hw_state);
3409 
3410 	icl_update_active_dpll(state, crtc, encoder);
3411 
3412 	return 0;
3413 
3414 err_unreference_tbt_pll:
3415 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3416 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3417 
3418 	return ret;
3419 }
3420 
3421 static int icl_compute_dplls(struct intel_atomic_state *state,
3422 			     struct intel_crtc *crtc,
3423 			     struct intel_encoder *encoder)
3424 {
3425 	if (intel_encoder_is_combo(encoder))
3426 		return icl_compute_combo_phy_dpll(state, crtc);
3427 	else if (intel_encoder_is_tc(encoder))
3428 		return icl_compute_tc_phy_dplls(state, crtc);
3429 
3430 	MISSING_CASE(encoder->port);
3431 
3432 	return 0;
3433 }
3434 
3435 static int icl_get_dplls(struct intel_atomic_state *state,
3436 			 struct intel_crtc *crtc,
3437 			 struct intel_encoder *encoder)
3438 {
3439 	if (intel_encoder_is_combo(encoder))
3440 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3441 	else if (intel_encoder_is_tc(encoder))
3442 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3443 
3444 	MISSING_CASE(encoder->port);
3445 
3446 	return -EINVAL;
3447 }
3448 
3449 static void icl_put_dplls(struct intel_atomic_state *state,
3450 			  struct intel_crtc *crtc)
3451 {
3452 	const struct intel_crtc_state *old_crtc_state =
3453 		intel_atomic_get_old_crtc_state(state, crtc);
3454 	struct intel_crtc_state *new_crtc_state =
3455 		intel_atomic_get_new_crtc_state(state, crtc);
3456 	enum icl_port_dpll_id id;
3457 
3458 	new_crtc_state->shared_dpll = NULL;
3459 
3460 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3461 		const struct icl_port_dpll *old_port_dpll =
3462 			&old_crtc_state->icl_port_dplls[id];
3463 		struct icl_port_dpll *new_port_dpll =
3464 			&new_crtc_state->icl_port_dplls[id];
3465 
3466 		new_port_dpll->pll = NULL;
3467 
3468 		if (!old_port_dpll->pll)
3469 			continue;
3470 
3471 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3472 	}
3473 }
3474 
3475 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3476 				struct intel_shared_dpll *pll,
3477 				struct intel_dpll_hw_state *hw_state)
3478 {
3479 	const enum intel_dpll_id id = pll->info->id;
3480 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3481 	intel_wakeref_t wakeref;
3482 	bool ret = false;
3483 	u32 val;
3484 
3485 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3486 
3487 	wakeref = intel_display_power_get_if_enabled(i915,
3488 						     POWER_DOMAIN_DISPLAY_CORE);
3489 	if (!wakeref)
3490 		return false;
3491 
3492 	val = intel_de_read(i915, enable_reg);
3493 	if (!(val & PLL_ENABLE))
3494 		goto out;
3495 
3496 	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3497 						  MG_REFCLKIN_CTL(tc_port));
3498 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3499 
3500 	hw_state->mg_clktop2_coreclkctl1 =
3501 		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3502 	hw_state->mg_clktop2_coreclkctl1 &=
3503 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3504 
3505 	hw_state->mg_clktop2_hsclkctl =
3506 		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3507 	hw_state->mg_clktop2_hsclkctl &=
3508 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3509 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3510 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3511 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3512 
3513 	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3514 	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3515 	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3516 	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3517 						   MG_PLL_FRAC_LOCK(tc_port));
3518 	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3519 
3520 	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3521 	hw_state->mg_pll_tdc_coldst_bias =
3522 		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3523 
3524 	if (i915->display.dpll.ref_clks.nssc == 38400) {
3525 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3526 		hw_state->mg_pll_bias_mask = 0;
3527 	} else {
3528 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3529 		hw_state->mg_pll_bias_mask = -1U;
3530 	}
3531 
3532 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3533 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3534 
3535 	ret = true;
3536 out:
3537 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3538 	return ret;
3539 }
3540 
3541 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3542 				 struct intel_shared_dpll *pll,
3543 				 struct intel_dpll_hw_state *hw_state)
3544 {
3545 	const enum intel_dpll_id id = pll->info->id;
3546 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3547 	intel_wakeref_t wakeref;
3548 	bool ret = false;
3549 	u32 val;
3550 
3551 	wakeref = intel_display_power_get_if_enabled(i915,
3552 						     POWER_DOMAIN_DISPLAY_CORE);
3553 	if (!wakeref)
3554 		return false;
3555 
3556 	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3557 	if (!(val & PLL_ENABLE))
3558 		goto out;
3559 
3560 	/*
3561 	 * All registers read here have the same HIP_INDEX_REG even though
3562 	 * they are on different building blocks
3563 	 */
3564 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3565 						       DKL_REFCLKIN_CTL(tc_port));
3566 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3567 
3568 	hw_state->mg_clktop2_hsclkctl =
3569 		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3570 	hw_state->mg_clktop2_hsclkctl &=
3571 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3572 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3573 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3574 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3575 
3576 	hw_state->mg_clktop2_coreclkctl1 =
3577 		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3578 	hw_state->mg_clktop2_coreclkctl1 &=
3579 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3580 
3581 	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3582 	val = DKL_PLL_DIV0_MASK;
3583 	if (i915->display.vbt.override_afc_startup)
3584 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3585 	hw_state->mg_pll_div0 &= val;
3586 
3587 	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3588 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3589 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3590 
3591 	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3592 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3593 				 DKL_PLL_SSC_STEP_LEN_MASK |
3594 				 DKL_PLL_SSC_STEP_NUM_MASK |
3595 				 DKL_PLL_SSC_EN);
3596 
3597 	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3598 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3599 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3600 
3601 	hw_state->mg_pll_tdc_coldst_bias =
3602 		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3603 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3604 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3605 
3606 	ret = true;
3607 out:
3608 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3609 	return ret;
3610 }
3611 
3612 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3613 				 struct intel_shared_dpll *pll,
3614 				 struct intel_dpll_hw_state *hw_state,
3615 				 i915_reg_t enable_reg)
3616 {
3617 	const enum intel_dpll_id id = pll->info->id;
3618 	intel_wakeref_t wakeref;
3619 	bool ret = false;
3620 	u32 val;
3621 
3622 	wakeref = intel_display_power_get_if_enabled(i915,
3623 						     POWER_DOMAIN_DISPLAY_CORE);
3624 	if (!wakeref)
3625 		return false;
3626 
3627 	val = intel_de_read(i915, enable_reg);
3628 	if (!(val & PLL_ENABLE))
3629 		goto out;
3630 
3631 	if (IS_ALDERLAKE_S(i915)) {
3632 		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3633 		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3634 	} else if (IS_DG1(i915)) {
3635 		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3636 		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3637 	} else if (IS_ROCKETLAKE(i915)) {
3638 		hw_state->cfgcr0 = intel_de_read(i915,
3639 						 RKL_DPLL_CFGCR0(id));
3640 		hw_state->cfgcr1 = intel_de_read(i915,
3641 						 RKL_DPLL_CFGCR1(id));
3642 	} else if (DISPLAY_VER(i915) >= 12) {
3643 		hw_state->cfgcr0 = intel_de_read(i915,
3644 						 TGL_DPLL_CFGCR0(id));
3645 		hw_state->cfgcr1 = intel_de_read(i915,
3646 						 TGL_DPLL_CFGCR1(id));
3647 		if (i915->display.vbt.override_afc_startup) {
3648 			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3649 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3650 		}
3651 	} else {
3652 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3653 		    id == DPLL_ID_EHL_DPLL4) {
3654 			hw_state->cfgcr0 = intel_de_read(i915,
3655 							 ICL_DPLL_CFGCR0(4));
3656 			hw_state->cfgcr1 = intel_de_read(i915,
3657 							 ICL_DPLL_CFGCR1(4));
3658 		} else {
3659 			hw_state->cfgcr0 = intel_de_read(i915,
3660 							 ICL_DPLL_CFGCR0(id));
3661 			hw_state->cfgcr1 = intel_de_read(i915,
3662 							 ICL_DPLL_CFGCR1(id));
3663 		}
3664 	}
3665 
3666 	ret = true;
3667 out:
3668 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3669 	return ret;
3670 }
3671 
3672 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3673 				   struct intel_shared_dpll *pll,
3674 				   struct intel_dpll_hw_state *hw_state)
3675 {
3676 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3677 
3678 	return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3679 }
3680 
3681 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3682 				 struct intel_shared_dpll *pll,
3683 				 struct intel_dpll_hw_state *hw_state)
3684 {
3685 	return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3686 }
3687 
3688 static void icl_dpll_write(struct drm_i915_private *i915,
3689 			   struct intel_shared_dpll *pll)
3690 {
3691 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3692 	const enum intel_dpll_id id = pll->info->id;
3693 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3694 
3695 	if (IS_ALDERLAKE_S(i915)) {
3696 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3697 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3698 	} else if (IS_DG1(i915)) {
3699 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3700 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3701 	} else if (IS_ROCKETLAKE(i915)) {
3702 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3703 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3704 	} else if (DISPLAY_VER(i915) >= 12) {
3705 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3706 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3707 		div0_reg = TGL_DPLL0_DIV0(id);
3708 	} else {
3709 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3710 		    id == DPLL_ID_EHL_DPLL4) {
3711 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3712 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3713 		} else {
3714 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3715 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3716 		}
3717 	}
3718 
3719 	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3720 	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3721 	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3722 			 !i915_mmio_reg_valid(div0_reg));
3723 	if (i915->display.vbt.override_afc_startup &&
3724 	    i915_mmio_reg_valid(div0_reg))
3725 		intel_de_rmw(i915, div0_reg,
3726 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3727 	intel_de_posting_read(i915, cfgcr1_reg);
3728 }
3729 
3730 static void icl_mg_pll_write(struct drm_i915_private *i915,
3731 			     struct intel_shared_dpll *pll)
3732 {
3733 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3734 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3735 
3736 	/*
3737 	 * Some of the following registers have reserved fields, so program
3738 	 * these with RMW based on a mask. The mask can be fixed or generated
3739 	 * during the calc/readout phase if the mask depends on some other HW
3740 	 * state like refclk, see icl_calc_mg_pll_state().
3741 	 */
3742 	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3743 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3744 
3745 	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3746 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3747 		     hw_state->mg_clktop2_coreclkctl1);
3748 
3749 	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3750 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3751 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3752 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3753 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3754 		     hw_state->mg_clktop2_hsclkctl);
3755 
3756 	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3757 	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3758 	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3759 	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3760 		       hw_state->mg_pll_frac_lock);
3761 	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3762 
3763 	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3764 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3765 
3766 	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3767 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3768 		     hw_state->mg_pll_tdc_coldst_bias);
3769 
3770 	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3771 }
3772 
3773 static void dkl_pll_write(struct drm_i915_private *i915,
3774 			  struct intel_shared_dpll *pll)
3775 {
3776 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3777 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3778 	u32 val;
3779 
3780 	/*
3781 	 * All registers programmed here have the same HIP_INDEX_REG even
3782 	 * though on different building block
3783 	 */
3784 	/* All the registers are RMW */
3785 	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3786 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3787 	val |= hw_state->mg_refclkin_ctl;
3788 	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3789 
3790 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3791 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3792 	val |= hw_state->mg_clktop2_coreclkctl1;
3793 	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3794 
3795 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3796 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3797 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3798 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3799 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3800 	val |= hw_state->mg_clktop2_hsclkctl;
3801 	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3802 
3803 	val = DKL_PLL_DIV0_MASK;
3804 	if (i915->display.vbt.override_afc_startup)
3805 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3806 	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3807 			  hw_state->mg_pll_div0);
3808 
3809 	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3810 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3811 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3812 	val |= hw_state->mg_pll_div1;
3813 	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3814 
3815 	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3816 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3817 		 DKL_PLL_SSC_STEP_LEN_MASK |
3818 		 DKL_PLL_SSC_STEP_NUM_MASK |
3819 		 DKL_PLL_SSC_EN);
3820 	val |= hw_state->mg_pll_ssc;
3821 	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3822 
3823 	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3824 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3825 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3826 	val |= hw_state->mg_pll_bias;
3827 	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3828 
3829 	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3830 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3831 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3832 	val |= hw_state->mg_pll_tdc_coldst_bias;
3833 	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3834 
3835 	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3836 }
3837 
3838 static void icl_pll_power_enable(struct drm_i915_private *i915,
3839 				 struct intel_shared_dpll *pll,
3840 				 i915_reg_t enable_reg)
3841 {
3842 	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3843 
3844 	/*
3845 	 * The spec says we need to "wait" but it also says it should be
3846 	 * immediate.
3847 	 */
3848 	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3849 		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3850 			pll->info->id);
3851 }
3852 
3853 static void icl_pll_enable(struct drm_i915_private *i915,
3854 			   struct intel_shared_dpll *pll,
3855 			   i915_reg_t enable_reg)
3856 {
3857 	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3858 
3859 	/* Timeout is actually 600us. */
3860 	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3861 		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3862 }
3863 
3864 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3865 {
3866 	u32 val;
3867 
3868 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3869 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3870 		return;
3871 	/*
3872 	 * Wa_16011069516:adl-p[a0]
3873 	 *
3874 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3875 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3876 	 * sanity check this assumption with a double read, which presumably
3877 	 * returns the correct value even with clock gating on.
3878 	 *
3879 	 * Instead of the usual place for workarounds we apply this one here,
3880 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3881 	 */
3882 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3883 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3884 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3885 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3886 }
3887 
3888 static void combo_pll_enable(struct drm_i915_private *i915,
3889 			     struct intel_shared_dpll *pll)
3890 {
3891 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3892 
3893 	icl_pll_power_enable(i915, pll, enable_reg);
3894 
3895 	icl_dpll_write(i915, pll);
3896 
3897 	/*
3898 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3899 	 * paths should already be setting the appropriate voltage, hence we do
3900 	 * nothing here.
3901 	 */
3902 
3903 	icl_pll_enable(i915, pll, enable_reg);
3904 
3905 	adlp_cmtg_clock_gating_wa(i915, pll);
3906 
3907 	/* DVFS post sequence would be here. See the comment above. */
3908 }
3909 
3910 static void tbt_pll_enable(struct drm_i915_private *i915,
3911 			   struct intel_shared_dpll *pll)
3912 {
3913 	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3914 
3915 	icl_dpll_write(i915, pll);
3916 
3917 	/*
3918 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3919 	 * paths should already be setting the appropriate voltage, hence we do
3920 	 * nothing here.
3921 	 */
3922 
3923 	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3924 
3925 	/* DVFS post sequence would be here. See the comment above. */
3926 }
3927 
3928 static void mg_pll_enable(struct drm_i915_private *i915,
3929 			  struct intel_shared_dpll *pll)
3930 {
3931 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3932 
3933 	icl_pll_power_enable(i915, pll, enable_reg);
3934 
3935 	if (DISPLAY_VER(i915) >= 12)
3936 		dkl_pll_write(i915, pll);
3937 	else
3938 		icl_mg_pll_write(i915, pll);
3939 
3940 	/*
3941 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3942 	 * paths should already be setting the appropriate voltage, hence we do
3943 	 * nothing here.
3944 	 */
3945 
3946 	icl_pll_enable(i915, pll, enable_reg);
3947 
3948 	/* DVFS post sequence would be here. See the comment above. */
3949 }
3950 
3951 static void icl_pll_disable(struct drm_i915_private *i915,
3952 			    struct intel_shared_dpll *pll,
3953 			    i915_reg_t enable_reg)
3954 {
3955 	/* The first steps are done by intel_ddi_post_disable(). */
3956 
3957 	/*
3958 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3959 	 * paths should already be setting the appropriate voltage, hence we do
3960 	 * nothing here.
3961 	 */
3962 
3963 	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
3964 
3965 	/* Timeout is actually 1us. */
3966 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3967 		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3968 
3969 	/* DVFS post sequence would be here. See the comment above. */
3970 
3971 	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
3972 
3973 	/*
3974 	 * The spec says we need to "wait" but it also says it should be
3975 	 * immediate.
3976 	 */
3977 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3978 		drm_err(&i915->drm, "PLL %d Power not disabled\n",
3979 			pll->info->id);
3980 }
3981 
3982 static void combo_pll_disable(struct drm_i915_private *i915,
3983 			      struct intel_shared_dpll *pll)
3984 {
3985 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3986 
3987 	icl_pll_disable(i915, pll, enable_reg);
3988 }
3989 
3990 static void tbt_pll_disable(struct drm_i915_private *i915,
3991 			    struct intel_shared_dpll *pll)
3992 {
3993 	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
3994 }
3995 
3996 static void mg_pll_disable(struct drm_i915_private *i915,
3997 			   struct intel_shared_dpll *pll)
3998 {
3999 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4000 
4001 	icl_pll_disable(i915, pll, enable_reg);
4002 }
4003 
4004 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4005 {
4006 	/* No SSC ref */
4007 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4008 }
4009 
4010 static void icl_dump_hw_state(struct drm_printer *p,
4011 			      const struct intel_dpll_hw_state *hw_state)
4012 {
4013 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4014 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4015 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4016 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4017 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4018 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4019 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4020 		   hw_state->mg_refclkin_ctl,
4021 		   hw_state->mg_clktop2_coreclkctl1,
4022 		   hw_state->mg_clktop2_hsclkctl,
4023 		   hw_state->mg_pll_div0,
4024 		   hw_state->mg_pll_div1,
4025 		   hw_state->mg_pll_lf,
4026 		   hw_state->mg_pll_frac_lock,
4027 		   hw_state->mg_pll_ssc,
4028 		   hw_state->mg_pll_bias,
4029 		   hw_state->mg_pll_tdc_coldst_bias);
4030 }
4031 
4032 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
4033 				 const struct intel_dpll_hw_state *b)
4034 {
4035 	/* FIXME split combo vs. mg more thoroughly */
4036 	return a->cfgcr0 == b->cfgcr0 &&
4037 		a->cfgcr1 == b->cfgcr1 &&
4038 		a->div0 == b->div0 &&
4039 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4040 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4041 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4042 		a->mg_pll_div0 == b->mg_pll_div0 &&
4043 		a->mg_pll_div1 == b->mg_pll_div1 &&
4044 		a->mg_pll_lf == b->mg_pll_lf &&
4045 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4046 		a->mg_pll_ssc == b->mg_pll_ssc &&
4047 		a->mg_pll_bias == b->mg_pll_bias &&
4048 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4049 }
4050 
4051 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4052 	.enable = combo_pll_enable,
4053 	.disable = combo_pll_disable,
4054 	.get_hw_state = combo_pll_get_hw_state,
4055 	.get_freq = icl_ddi_combo_pll_get_freq,
4056 };
4057 
4058 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4059 	.enable = tbt_pll_enable,
4060 	.disable = tbt_pll_disable,
4061 	.get_hw_state = tbt_pll_get_hw_state,
4062 	.get_freq = icl_ddi_tbt_pll_get_freq,
4063 };
4064 
4065 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4066 	.enable = mg_pll_enable,
4067 	.disable = mg_pll_disable,
4068 	.get_hw_state = mg_pll_get_hw_state,
4069 	.get_freq = icl_ddi_mg_pll_get_freq,
4070 };
4071 
4072 static const struct dpll_info icl_plls[] = {
4073 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4074 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4075 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4076 	  .is_alt_port_dpll = true, },
4077 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4078 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4079 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4080 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4081 	{}
4082 };
4083 
4084 static const struct intel_dpll_mgr icl_pll_mgr = {
4085 	.dpll_info = icl_plls,
4086 	.compute_dplls = icl_compute_dplls,
4087 	.get_dplls = icl_get_dplls,
4088 	.put_dplls = icl_put_dplls,
4089 	.update_active_dpll = icl_update_active_dpll,
4090 	.update_ref_clks = icl_update_dpll_ref_clks,
4091 	.dump_hw_state = icl_dump_hw_state,
4092 	.compare_hw_state = icl_compare_hw_state,
4093 };
4094 
4095 static const struct dpll_info ehl_plls[] = {
4096 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4097 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4098 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4099 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4100 	{}
4101 };
4102 
4103 static const struct intel_dpll_mgr ehl_pll_mgr = {
4104 	.dpll_info = ehl_plls,
4105 	.compute_dplls = icl_compute_dplls,
4106 	.get_dplls = icl_get_dplls,
4107 	.put_dplls = icl_put_dplls,
4108 	.update_ref_clks = icl_update_dpll_ref_clks,
4109 	.dump_hw_state = icl_dump_hw_state,
4110 	.compare_hw_state = icl_compare_hw_state,
4111 };
4112 
4113 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4114 	.enable = mg_pll_enable,
4115 	.disable = mg_pll_disable,
4116 	.get_hw_state = dkl_pll_get_hw_state,
4117 	.get_freq = icl_ddi_mg_pll_get_freq,
4118 };
4119 
4120 static const struct dpll_info tgl_plls[] = {
4121 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4122 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4123 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4124 	  .is_alt_port_dpll = true, },
4125 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4126 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4127 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4128 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4129 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4130 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4131 	{}
4132 };
4133 
4134 static const struct intel_dpll_mgr tgl_pll_mgr = {
4135 	.dpll_info = tgl_plls,
4136 	.compute_dplls = icl_compute_dplls,
4137 	.get_dplls = icl_get_dplls,
4138 	.put_dplls = icl_put_dplls,
4139 	.update_active_dpll = icl_update_active_dpll,
4140 	.update_ref_clks = icl_update_dpll_ref_clks,
4141 	.dump_hw_state = icl_dump_hw_state,
4142 	.compare_hw_state = icl_compare_hw_state,
4143 };
4144 
4145 static const struct dpll_info rkl_plls[] = {
4146 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4147 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4148 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4149 	{}
4150 };
4151 
4152 static const struct intel_dpll_mgr rkl_pll_mgr = {
4153 	.dpll_info = rkl_plls,
4154 	.compute_dplls = icl_compute_dplls,
4155 	.get_dplls = icl_get_dplls,
4156 	.put_dplls = icl_put_dplls,
4157 	.update_ref_clks = icl_update_dpll_ref_clks,
4158 	.dump_hw_state = icl_dump_hw_state,
4159 	.compare_hw_state = icl_compare_hw_state,
4160 };
4161 
4162 static const struct dpll_info dg1_plls[] = {
4163 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4164 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4165 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4166 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4167 	{}
4168 };
4169 
4170 static const struct intel_dpll_mgr dg1_pll_mgr = {
4171 	.dpll_info = dg1_plls,
4172 	.compute_dplls = icl_compute_dplls,
4173 	.get_dplls = icl_get_dplls,
4174 	.put_dplls = icl_put_dplls,
4175 	.update_ref_clks = icl_update_dpll_ref_clks,
4176 	.dump_hw_state = icl_dump_hw_state,
4177 	.compare_hw_state = icl_compare_hw_state,
4178 };
4179 
4180 static const struct dpll_info adls_plls[] = {
4181 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4182 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4183 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4184 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4185 	{}
4186 };
4187 
4188 static const struct intel_dpll_mgr adls_pll_mgr = {
4189 	.dpll_info = adls_plls,
4190 	.compute_dplls = icl_compute_dplls,
4191 	.get_dplls = icl_get_dplls,
4192 	.put_dplls = icl_put_dplls,
4193 	.update_ref_clks = icl_update_dpll_ref_clks,
4194 	.dump_hw_state = icl_dump_hw_state,
4195 	.compare_hw_state = icl_compare_hw_state,
4196 };
4197 
4198 static const struct dpll_info adlp_plls[] = {
4199 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4200 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4201 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4202 	  .is_alt_port_dpll = true, },
4203 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4204 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4205 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4206 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4207 	{}
4208 };
4209 
4210 static const struct intel_dpll_mgr adlp_pll_mgr = {
4211 	.dpll_info = adlp_plls,
4212 	.compute_dplls = icl_compute_dplls,
4213 	.get_dplls = icl_get_dplls,
4214 	.put_dplls = icl_put_dplls,
4215 	.update_active_dpll = icl_update_active_dpll,
4216 	.update_ref_clks = icl_update_dpll_ref_clks,
4217 	.dump_hw_state = icl_dump_hw_state,
4218 	.compare_hw_state = icl_compare_hw_state,
4219 };
4220 
4221 /**
4222  * intel_shared_dpll_init - Initialize shared DPLLs
4223  * @i915: i915 device
4224  *
4225  * Initialize shared DPLLs for @i915.
4226  */
4227 void intel_shared_dpll_init(struct drm_i915_private *i915)
4228 {
4229 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4230 	const struct dpll_info *dpll_info;
4231 	int i;
4232 
4233 	mutex_init(&i915->display.dpll.lock);
4234 
4235 	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4236 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4237 		dpll_mgr = NULL;
4238 	else if (IS_ALDERLAKE_P(i915))
4239 		dpll_mgr = &adlp_pll_mgr;
4240 	else if (IS_ALDERLAKE_S(i915))
4241 		dpll_mgr = &adls_pll_mgr;
4242 	else if (IS_DG1(i915))
4243 		dpll_mgr = &dg1_pll_mgr;
4244 	else if (IS_ROCKETLAKE(i915))
4245 		dpll_mgr = &rkl_pll_mgr;
4246 	else if (DISPLAY_VER(i915) >= 12)
4247 		dpll_mgr = &tgl_pll_mgr;
4248 	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4249 		dpll_mgr = &ehl_pll_mgr;
4250 	else if (DISPLAY_VER(i915) >= 11)
4251 		dpll_mgr = &icl_pll_mgr;
4252 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4253 		dpll_mgr = &bxt_pll_mgr;
4254 	else if (DISPLAY_VER(i915) == 9)
4255 		dpll_mgr = &skl_pll_mgr;
4256 	else if (HAS_DDI(i915))
4257 		dpll_mgr = &hsw_pll_mgr;
4258 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4259 		dpll_mgr = &pch_pll_mgr;
4260 
4261 	if (!dpll_mgr)
4262 		return;
4263 
4264 	dpll_info = dpll_mgr->dpll_info;
4265 
4266 	for (i = 0; dpll_info[i].name; i++) {
4267 		if (drm_WARN_ON(&i915->drm,
4268 				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4269 			break;
4270 
4271 		/* must fit into unsigned long bitmask on 32bit */
4272 		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4273 			break;
4274 
4275 		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4276 		i915->display.dpll.shared_dplls[i].index = i;
4277 	}
4278 
4279 	i915->display.dpll.mgr = dpll_mgr;
4280 	i915->display.dpll.num_shared_dpll = i;
4281 }
4282 
4283 /**
4284  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4285  * @state: atomic state
4286  * @crtc: CRTC to compute DPLLs for
4287  * @encoder: encoder
4288  *
4289  * This function computes the DPLL state for the given CRTC and encoder.
4290  *
4291  * The new configuration in the atomic commit @state is made effective by
4292  * calling intel_shared_dpll_swap_state().
4293  *
4294  * Returns:
4295  * 0 on success, negative error code on falure.
4296  */
4297 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4298 			       struct intel_crtc *crtc,
4299 			       struct intel_encoder *encoder)
4300 {
4301 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4302 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4303 
4304 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4305 		return -EINVAL;
4306 
4307 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4308 }
4309 
4310 /**
4311  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4312  * @state: atomic state
4313  * @crtc: CRTC to reserve DPLLs for
4314  * @encoder: encoder
4315  *
4316  * This function reserves all required DPLLs for the given CRTC and encoder
4317  * combination in the current atomic commit @state and the new @crtc atomic
4318  * state.
4319  *
4320  * The new configuration in the atomic commit @state is made effective by
4321  * calling intel_shared_dpll_swap_state().
4322  *
4323  * The reserved DPLLs should be released by calling
4324  * intel_release_shared_dplls().
4325  *
4326  * Returns:
4327  * 0 if all required DPLLs were successfully reserved,
4328  * negative error code otherwise.
4329  */
4330 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4331 			       struct intel_crtc *crtc,
4332 			       struct intel_encoder *encoder)
4333 {
4334 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4335 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4336 
4337 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4338 		return -EINVAL;
4339 
4340 	return dpll_mgr->get_dplls(state, crtc, encoder);
4341 }
4342 
4343 /**
4344  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4345  * @state: atomic state
4346  * @crtc: crtc from which the DPLLs are to be released
4347  *
4348  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4349  * from the current atomic commit @state and the old @crtc atomic state.
4350  *
4351  * The new configuration in the atomic commit @state is made effective by
4352  * calling intel_shared_dpll_swap_state().
4353  */
4354 void intel_release_shared_dplls(struct intel_atomic_state *state,
4355 				struct intel_crtc *crtc)
4356 {
4357 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4358 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4359 
4360 	/*
4361 	 * FIXME: this function is called for every platform having a
4362 	 * compute_clock hook, even though the platform doesn't yet support
4363 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4364 	 * called on those.
4365 	 */
4366 	if (!dpll_mgr)
4367 		return;
4368 
4369 	dpll_mgr->put_dplls(state, crtc);
4370 }
4371 
4372 /**
4373  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4374  * @state: atomic state
4375  * @crtc: the CRTC for which to update the active DPLL
4376  * @encoder: encoder determining the type of port DPLL
4377  *
4378  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4379  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4380  * DPLL selected will be based on the current mode of the encoder's port.
4381  */
4382 void intel_update_active_dpll(struct intel_atomic_state *state,
4383 			      struct intel_crtc *crtc,
4384 			      struct intel_encoder *encoder)
4385 {
4386 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4387 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4388 
4389 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4390 		return;
4391 
4392 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4393 }
4394 
4395 /**
4396  * intel_dpll_get_freq - calculate the DPLL's output frequency
4397  * @i915: i915 device
4398  * @pll: DPLL for which to calculate the output frequency
4399  * @pll_state: DPLL state from which to calculate the output frequency
4400  *
4401  * Return the output frequency corresponding to @pll's passed in @pll_state.
4402  */
4403 int intel_dpll_get_freq(struct drm_i915_private *i915,
4404 			const struct intel_shared_dpll *pll,
4405 			const struct intel_dpll_hw_state *pll_state)
4406 {
4407 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4408 		return 0;
4409 
4410 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4411 }
4412 
4413 /**
4414  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4415  * @i915: i915 device
4416  * @pll: DPLL for which to calculate the output frequency
4417  * @hw_state: DPLL's hardware state
4418  *
4419  * Read out @pll's hardware state into @hw_state.
4420  */
4421 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4422 			     struct intel_shared_dpll *pll,
4423 			     struct intel_dpll_hw_state *hw_state)
4424 {
4425 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4426 }
4427 
4428 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4429 				  struct intel_shared_dpll *pll)
4430 {
4431 	struct intel_crtc *crtc;
4432 
4433 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4434 
4435 	if (pll->on && pll->info->power_domain)
4436 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4437 
4438 	pll->state.pipe_mask = 0;
4439 	for_each_intel_crtc(&i915->drm, crtc) {
4440 		struct intel_crtc_state *crtc_state =
4441 			to_intel_crtc_state(crtc->base.state);
4442 
4443 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4444 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4445 	}
4446 	pll->active_mask = pll->state.pipe_mask;
4447 
4448 	drm_dbg_kms(&i915->drm,
4449 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4450 		    pll->info->name, pll->state.pipe_mask, pll->on);
4451 }
4452 
4453 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4454 {
4455 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4456 		i915->display.dpll.mgr->update_ref_clks(i915);
4457 }
4458 
4459 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4460 {
4461 	struct intel_shared_dpll *pll;
4462 	int i;
4463 
4464 	for_each_shared_dpll(i915, pll, i)
4465 		readout_dpll_hw_state(i915, pll);
4466 }
4467 
4468 static void sanitize_dpll_state(struct drm_i915_private *i915,
4469 				struct intel_shared_dpll *pll)
4470 {
4471 	if (!pll->on)
4472 		return;
4473 
4474 	adlp_cmtg_clock_gating_wa(i915, pll);
4475 
4476 	if (pll->active_mask)
4477 		return;
4478 
4479 	drm_dbg_kms(&i915->drm,
4480 		    "%s enabled but not in use, disabling\n",
4481 		    pll->info->name);
4482 
4483 	_intel_disable_shared_dpll(i915, pll);
4484 }
4485 
4486 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4487 {
4488 	struct intel_shared_dpll *pll;
4489 	int i;
4490 
4491 	for_each_shared_dpll(i915, pll, i)
4492 		sanitize_dpll_state(i915, pll);
4493 }
4494 
4495 /**
4496  * intel_dpll_dump_hw_state - dump hw_state
4497  * @i915: i915 drm device
4498  * @p: where to print the state to
4499  * @hw_state: hw state to be dumped
4500  *
4501  * Dumo out the relevant values in @hw_state.
4502  */
4503 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4504 			      struct drm_printer *p,
4505 			      const struct intel_dpll_hw_state *hw_state)
4506 {
4507 	if (i915->display.dpll.mgr) {
4508 		i915->display.dpll.mgr->dump_hw_state(p, hw_state);
4509 	} else {
4510 		/* fallback for platforms that don't use the shared dpll
4511 		 * infrastructure
4512 		 */
4513 		ibx_dump_hw_state(p, hw_state);
4514 	}
4515 }
4516 
4517 /**
4518  * intel_dpll_compare_hw_state - compare the two states
4519  * @i915: i915 drm device
4520  * @a: first DPLL hw state
4521  * @b: second DPLL hw state
4522  *
4523  * Compare DPLL hw states @a and @b.
4524  *
4525  * Returns: true if the states are equal, false if the differ
4526  */
4527 bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4528 				 const struct intel_dpll_hw_state *a,
4529 				 const struct intel_dpll_hw_state *b)
4530 {
4531 	if (i915->display.dpll.mgr) {
4532 		return i915->display.dpll.mgr->compare_hw_state(a, b);
4533 	} else {
4534 		/* fallback for platforms that don't use the shared dpll
4535 		 * infrastructure
4536 		 */
4537 		return ibx_compare_hw_state(a, b);
4538 	}
4539 }
4540 
4541 static void
4542 verify_single_dpll_state(struct drm_i915_private *i915,
4543 			 struct intel_shared_dpll *pll,
4544 			 struct intel_crtc *crtc,
4545 			 const struct intel_crtc_state *new_crtc_state)
4546 {
4547 	struct intel_dpll_hw_state dpll_hw_state = {};
4548 	u8 pipe_mask;
4549 	bool active;
4550 
4551 	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4552 
4553 	if (!pll->info->always_on) {
4554 		I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4555 				"%s: pll in active use but not on in sw tracking\n",
4556 				pll->info->name);
4557 		I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4558 				"%s: pll is on but not used by any active pipe\n",
4559 				pll->info->name);
4560 		I915_STATE_WARN(i915, pll->on != active,
4561 				"%s: pll on state mismatch (expected %i, found %i)\n",
4562 				pll->info->name, pll->on, active);
4563 	}
4564 
4565 	if (!crtc) {
4566 		I915_STATE_WARN(i915,
4567 				pll->active_mask & ~pll->state.pipe_mask,
4568 				"%s: more active pll users than references: 0x%x vs 0x%x\n",
4569 				pll->info->name, pll->active_mask, pll->state.pipe_mask);
4570 
4571 		return;
4572 	}
4573 
4574 	pipe_mask = BIT(crtc->pipe);
4575 
4576 	if (new_crtc_state->hw.active)
4577 		I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4578 				"%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4579 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4580 	else
4581 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4582 				"%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4583 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4584 
4585 	I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4586 			"%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4587 			pll->info->name, pipe_mask, pll->state.pipe_mask);
4588 
4589 	I915_STATE_WARN(i915,
4590 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4591 					  sizeof(dpll_hw_state)),
4592 			"%s: pll hw state mismatch\n",
4593 			pll->info->name);
4594 }
4595 
4596 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4597 			      const struct intel_shared_dpll *new_pll)
4598 {
4599 	return old_pll && new_pll && old_pll != new_pll &&
4600 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4601 }
4602 
4603 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4604 				    struct intel_crtc *crtc)
4605 {
4606 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4607 	const struct intel_crtc_state *old_crtc_state =
4608 		intel_atomic_get_old_crtc_state(state, crtc);
4609 	const struct intel_crtc_state *new_crtc_state =
4610 		intel_atomic_get_new_crtc_state(state, crtc);
4611 
4612 	if (new_crtc_state->shared_dpll)
4613 		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4614 					 crtc, new_crtc_state);
4615 
4616 	if (old_crtc_state->shared_dpll &&
4617 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4618 		u8 pipe_mask = BIT(crtc->pipe);
4619 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4620 
4621 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4622 				"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4623 				pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4624 
4625 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4626 		I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4627 							 new_crtc_state->shared_dpll) &&
4628 				pll->state.pipe_mask & pipe_mask,
4629 				"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4630 				pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4631 	}
4632 }
4633 
4634 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4635 {
4636 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4637 	struct intel_shared_dpll *pll;
4638 	int i;
4639 
4640 	for_each_shared_dpll(i915, pll, i)
4641 		verify_single_dpll_state(i915, pll, NULL, NULL);
4642 }
4643