xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include <drm/drm_print.h>
28 
29 #include "bxt_dpio_phy_regs.h"
30 #include "intel_cx0_phy.h"
31 #include "intel_de.h"
32 #include "intel_display_regs.h"
33 #include "intel_display_types.h"
34 #include "intel_display_utils.h"
35 #include "intel_dkl_phy.h"
36 #include "intel_dkl_phy_regs.h"
37 #include "intel_dpio_phy.h"
38 #include "intel_dpll.h"
39 #include "intel_dpll_mgr.h"
40 #include "intel_hti.h"
41 #include "intel_mg_phy_regs.h"
42 #include "intel_pch_refclk.h"
43 #include "intel_step.h"
44 #include "intel_tc.h"
45 
46 /**
47  * DOC: Display PLLs
48  *
49  * Display PLLs used for driving outputs vary by platform. While some have
50  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
51  * from a pool. In the latter scenario, it is possible that multiple pipes
52  * share a PLL if their configurations match.
53  *
54  * This file provides an abstraction over display PLLs. The function
55  * intel_dpll_init() initializes the PLLs for the given platform.  The
56  * users of a PLL are tracked and that tracking is integrated with the atomic
57  * modset interface. During an atomic operation, required PLLs can be reserved
58  * for a given CRTC and encoder configuration by calling
59  * intel_dpll_reserve() and previously reserved PLLs can be released
60  * with intel_dpll_release().
61  * Changes to the users are first staged in the atomic state, and then made
62  * effective by calling intel_dpll_swap_state() during the atomic
63  * commit phase.
64  */
65 
66 /* platform specific hooks for managing DPLLs */
67 struct intel_dpll_funcs {
68 	/*
69 	 * Hook for enabling the pll, called from intel_enable_dpll() if
70 	 * the pll is not already enabled.
71 	 */
72 	void (*enable)(struct intel_display *display,
73 		       struct intel_dpll *pll,
74 		       const struct intel_dpll_hw_state *dpll_hw_state);
75 
76 	/*
77 	 * Hook for disabling the pll, called from intel_disable_dpll()
78 	 * only when it is safe to disable the pll, i.e., there are no more
79 	 * tracked users for it.
80 	 */
81 	void (*disable)(struct intel_display *display,
82 			struct intel_dpll *pll);
83 
84 	/*
85 	 * Hook for reading the values currently programmed to the DPLL
86 	 * registers. This is used for initial hw state readout and state
87 	 * verification after a mode set.
88 	 */
89 	bool (*get_hw_state)(struct intel_display *display,
90 			     struct intel_dpll *pll,
91 			     struct intel_dpll_hw_state *dpll_hw_state);
92 
93 	/*
94 	 * Hook for calculating the pll's output frequency based on its passed
95 	 * in state.
96 	 */
97 	int (*get_freq)(struct intel_display *i915,
98 			const struct intel_dpll *pll,
99 			const struct intel_dpll_hw_state *dpll_hw_state);
100 };
101 
102 struct intel_dpll_mgr {
103 	const struct dpll_info *dpll_info;
104 
105 	int (*compute_dplls)(struct intel_atomic_state *state,
106 			     struct intel_crtc *crtc,
107 			     struct intel_encoder *encoder);
108 	int (*get_dplls)(struct intel_atomic_state *state,
109 			 struct intel_crtc *crtc,
110 			 struct intel_encoder *encoder);
111 	void (*put_dplls)(struct intel_atomic_state *state,
112 			  struct intel_crtc *crtc);
113 	void (*update_active_dpll)(struct intel_atomic_state *state,
114 				   struct intel_crtc *crtc,
115 				   struct intel_encoder *encoder);
116 	void (*update_ref_clks)(struct intel_display *display);
117 	void (*dump_hw_state)(struct drm_printer *p,
118 			      const struct intel_dpll_hw_state *dpll_hw_state);
119 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
120 				 const struct intel_dpll_hw_state *b);
121 };
122 
123 static void
124 intel_atomic_duplicate_dpll_state(struct intel_display *display,
125 				  struct intel_dpll_state *dpll_state)
126 {
127 	struct intel_dpll *pll;
128 	int i;
129 
130 	/* Copy dpll state */
131 	for_each_dpll(display, pll, i)
132 		dpll_state[pll->index] = pll->state;
133 }
134 
135 static struct intel_dpll_state *
136 intel_atomic_get_dpll_state(struct drm_atomic_state *s)
137 {
138 	struct intel_atomic_state *state = to_intel_atomic_state(s);
139 	struct intel_display *display = to_intel_display(state);
140 
141 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
142 
143 	if (!state->dpll_set) {
144 		state->dpll_set = true;
145 
146 		intel_atomic_duplicate_dpll_state(display,
147 						  state->dpll_state);
148 	}
149 
150 	return state->dpll_state;
151 }
152 
153 /**
154  * intel_get_dpll_by_id - get a DPLL given its id
155  * @display: intel_display device instance
156  * @id: pll id
157  *
158  * Returns:
159  * A pointer to the DPLL with @id
160  */
161 struct intel_dpll *
162 intel_get_dpll_by_id(struct intel_display *display,
163 		     enum intel_dpll_id id)
164 {
165 	struct intel_dpll *pll;
166 	int i;
167 
168 	for_each_dpll(display, pll, i) {
169 		if (pll->info->id == id)
170 			return pll;
171 	}
172 
173 	MISSING_CASE(id);
174 	return NULL;
175 }
176 
177 /* For ILK+ */
178 void assert_dpll(struct intel_display *display,
179 		 struct intel_dpll *pll,
180 		 bool state)
181 {
182 	bool cur_state;
183 	struct intel_dpll_hw_state hw_state;
184 
185 	if (drm_WARN(display->drm, !pll,
186 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
187 		return;
188 
189 	cur_state = intel_dpll_get_hw_state(display, pll, &hw_state);
190 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
191 				 "%s assertion failure (expected %s, current %s)\n",
192 				 pll->info->name, str_on_off(state),
193 				 str_on_off(cur_state));
194 }
195 
196 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
197 {
198 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
199 }
200 
201 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
202 {
203 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
204 }
205 
206 enum intel_dpll_id mtl_port_to_pll_id(struct intel_display *display, enum port port)
207 {
208 	if (port >= PORT_TC1)
209 		return icl_tc_port_to_pll_id(intel_port_to_tc(display, port));
210 
211 	switch (port) {
212 	case PORT_A:
213 		return DPLL_ID_ICL_DPLL0;
214 	case PORT_B:
215 		return DPLL_ID_ICL_DPLL1;
216 	default:
217 		MISSING_CASE(port);
218 		return DPLL_ID_ICL_DPLL0;
219 	}
220 }
221 
222 static i915_reg_t
223 intel_combo_pll_enable_reg(struct intel_display *display,
224 			   struct intel_dpll *pll)
225 {
226 	if (display->platform.dg1)
227 		return DG1_DPLL_ENABLE(pll->info->id);
228 	else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
229 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
230 		return MG_PLL_ENABLE(0);
231 
232 	return ICL_DPLL_ENABLE(pll->info->id);
233 }
234 
235 static i915_reg_t
236 intel_tc_pll_enable_reg(struct intel_display *display,
237 			struct intel_dpll *pll)
238 {
239 	const enum intel_dpll_id id = pll->info->id;
240 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
241 
242 	if (display->platform.alderlake_p)
243 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
244 
245 	return MG_PLL_ENABLE(tc_port);
246 }
247 
248 static void _intel_enable_shared_dpll(struct intel_display *display,
249 				      struct intel_dpll *pll)
250 {
251 	if (pll->info->power_domain)
252 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
253 
254 	pll->info->funcs->enable(display, pll, &pll->state.hw_state);
255 	pll->on = true;
256 }
257 
258 static void _intel_disable_shared_dpll(struct intel_display *display,
259 				       struct intel_dpll *pll)
260 {
261 	pll->info->funcs->disable(display, pll);
262 	pll->on = false;
263 
264 	if (pll->info->power_domain)
265 		intel_display_power_put(display, pll->info->power_domain, pll->wakeref);
266 }
267 
268 /**
269  * intel_dpll_enable - enable a CRTC's DPLL
270  * @crtc_state: CRTC, and its state, which has a DPLL
271  *
272  * Enable DPLL used by @crtc.
273  */
274 void intel_dpll_enable(const struct intel_crtc_state *crtc_state)
275 {
276 	struct intel_display *display = to_intel_display(crtc_state);
277 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
278 	struct intel_dpll *pll = crtc_state->intel_dpll;
279 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
280 	unsigned int old_mask;
281 
282 	if (drm_WARN_ON(display->drm, !pll))
283 		return;
284 
285 	mutex_lock(&display->dpll.lock);
286 	old_mask = pll->active_mask;
287 
288 	if (drm_WARN_ON(display->drm, !(pll->state.pipe_mask & pipe_mask)) ||
289 	    drm_WARN_ON(display->drm, pll->active_mask & pipe_mask))
290 		goto out;
291 
292 	pll->active_mask |= pipe_mask;
293 
294 	drm_dbg_kms(display->drm,
295 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
296 		    pll->info->name, pll->active_mask, pll->on,
297 		    crtc->base.base.id, crtc->base.name);
298 
299 	if (old_mask) {
300 		drm_WARN_ON(display->drm, !pll->on);
301 		assert_dpll_enabled(display, pll);
302 		goto out;
303 	}
304 	drm_WARN_ON(display->drm, pll->on);
305 
306 	drm_dbg_kms(display->drm, "enabling %s\n", pll->info->name);
307 
308 	_intel_enable_shared_dpll(display, pll);
309 
310 out:
311 	mutex_unlock(&display->dpll.lock);
312 }
313 
314 /**
315  * intel_dpll_disable - disable a CRTC's shared DPLL
316  * @crtc_state: CRTC, and its state, which has a shared DPLL
317  *
318  * Disable DPLL used by @crtc.
319  */
320 void intel_dpll_disable(const struct intel_crtc_state *crtc_state)
321 {
322 	struct intel_display *display = to_intel_display(crtc_state);
323 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
324 	struct intel_dpll *pll = crtc_state->intel_dpll;
325 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
326 
327 	/* PCH only available on ILK+ */
328 	if (DISPLAY_VER(display) < 5)
329 		return;
330 
331 	if (pll == NULL)
332 		return;
333 
334 	mutex_lock(&display->dpll.lock);
335 	if (drm_WARN(display->drm, !(pll->active_mask & pipe_mask),
336 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
337 		     crtc->base.base.id, crtc->base.name))
338 		goto out;
339 
340 	drm_dbg_kms(display->drm,
341 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
342 		    pll->info->name, pll->active_mask, pll->on,
343 		    crtc->base.base.id, crtc->base.name);
344 
345 	assert_dpll_enabled(display, pll);
346 	drm_WARN_ON(display->drm, !pll->on);
347 
348 	pll->active_mask &= ~pipe_mask;
349 	if (pll->active_mask)
350 		goto out;
351 
352 	drm_dbg_kms(display->drm, "disabling %s\n", pll->info->name);
353 
354 	_intel_disable_shared_dpll(display, pll);
355 
356 out:
357 	mutex_unlock(&display->dpll.lock);
358 }
359 
360 static unsigned long
361 intel_dpll_mask_all(struct intel_display *display)
362 {
363 	struct intel_dpll *pll;
364 	unsigned long dpll_mask = 0;
365 	int i;
366 
367 	for_each_dpll(display, pll, i) {
368 		drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
369 
370 		dpll_mask |= BIT(pll->info->id);
371 	}
372 
373 	return dpll_mask;
374 }
375 
376 static struct intel_dpll *
377 intel_find_dpll(struct intel_atomic_state *state,
378 		const struct intel_crtc *crtc,
379 		const struct intel_dpll_hw_state *dpll_hw_state,
380 		unsigned long dpll_mask)
381 {
382 	struct intel_display *display = to_intel_display(crtc);
383 	unsigned long dpll_mask_all = intel_dpll_mask_all(display);
384 	struct intel_dpll_state *dpll_state;
385 	struct intel_dpll *unused_pll = NULL;
386 	enum intel_dpll_id id;
387 
388 	dpll_state = intel_atomic_get_dpll_state(&state->base);
389 
390 	drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
391 
392 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
393 		struct intel_dpll *pll;
394 
395 		pll = intel_get_dpll_by_id(display, id);
396 		if (!pll)
397 			continue;
398 
399 		/* Only want to check enabled timings first */
400 		if (dpll_state[pll->index].pipe_mask == 0) {
401 			if (!unused_pll)
402 				unused_pll = pll;
403 			continue;
404 		}
405 
406 		if (memcmp(dpll_hw_state,
407 			   &dpll_state[pll->index].hw_state,
408 			   sizeof(*dpll_hw_state)) == 0) {
409 			drm_dbg_kms(display->drm,
410 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
411 				    crtc->base.base.id, crtc->base.name,
412 				    pll->info->name,
413 				    dpll_state[pll->index].pipe_mask,
414 				    pll->active_mask);
415 			return pll;
416 		}
417 	}
418 
419 	/* Ok no matching timings, maybe there's a free one? */
420 	if (unused_pll) {
421 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] allocated %s\n",
422 			    crtc->base.base.id, crtc->base.name,
423 			    unused_pll->info->name);
424 		return unused_pll;
425 	}
426 
427 	return NULL;
428 }
429 
430 /**
431  * intel_dpll_crtc_get - Get a DPLL reference for a CRTC
432  * @crtc: CRTC on which behalf the reference is taken
433  * @pll: DPLL for which the reference is taken
434  * @dpll_state: the DPLL atomic state in which the reference is tracked
435  *
436  * Take a reference for @pll tracking the use of it by @crtc.
437  */
438 static void
439 intel_dpll_crtc_get(const struct intel_crtc *crtc,
440 		    const struct intel_dpll *pll,
441 		    struct intel_dpll_state *dpll_state)
442 {
443 	struct intel_display *display = to_intel_display(crtc);
444 
445 	drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
446 
447 	dpll_state->pipe_mask |= BIT(crtc->pipe);
448 
449 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
450 		    crtc->base.base.id, crtc->base.name, pll->info->name);
451 }
452 
453 static void
454 intel_reference_dpll(struct intel_atomic_state *state,
455 		     const struct intel_crtc *crtc,
456 		     const struct intel_dpll *pll,
457 		     const struct intel_dpll_hw_state *dpll_hw_state)
458 {
459 	struct intel_dpll_state *dpll_state;
460 
461 	dpll_state = intel_atomic_get_dpll_state(&state->base);
462 
463 	if (dpll_state[pll->index].pipe_mask == 0)
464 		dpll_state[pll->index].hw_state = *dpll_hw_state;
465 
466 	intel_dpll_crtc_get(crtc, pll, &dpll_state[pll->index]);
467 }
468 
469 /**
470  * intel_dpll_crtc_put - Drop a DPLL reference for a CRTC
471  * @crtc: CRTC on which behalf the reference is dropped
472  * @pll: DPLL for which the reference is dropped
473  * @dpll_state: the DPLL atomic state in which the reference is tracked
474  *
475  * Drop a reference for @pll tracking the end of use of it by @crtc.
476  */
477 void
478 intel_dpll_crtc_put(const struct intel_crtc *crtc,
479 		    const struct intel_dpll *pll,
480 		    struct intel_dpll_state *dpll_state)
481 {
482 	struct intel_display *display = to_intel_display(crtc);
483 
484 	drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
485 
486 	dpll_state->pipe_mask &= ~BIT(crtc->pipe);
487 
488 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
489 		    crtc->base.base.id, crtc->base.name, pll->info->name);
490 }
491 
492 static void intel_unreference_dpll(struct intel_atomic_state *state,
493 				   const struct intel_crtc *crtc,
494 				   const struct intel_dpll *pll)
495 {
496 	struct intel_dpll_state *dpll_state;
497 
498 	dpll_state = intel_atomic_get_dpll_state(&state->base);
499 
500 	intel_dpll_crtc_put(crtc, pll, &dpll_state[pll->index]);
501 }
502 
503 static void intel_put_dpll(struct intel_atomic_state *state,
504 			   struct intel_crtc *crtc)
505 {
506 	const struct intel_crtc_state *old_crtc_state =
507 		intel_atomic_get_old_crtc_state(state, crtc);
508 	struct intel_crtc_state *new_crtc_state =
509 		intel_atomic_get_new_crtc_state(state, crtc);
510 
511 	new_crtc_state->intel_dpll = NULL;
512 
513 	if (!old_crtc_state->intel_dpll)
514 		return;
515 
516 	intel_unreference_dpll(state, crtc, old_crtc_state->intel_dpll);
517 }
518 
519 /**
520  * intel_dpll_swap_state - make atomic DPLL configuration effective
521  * @state: atomic state
522  *
523  * This is the dpll version of drm_atomic_helper_swap_state() since the
524  * helper does not handle driver-specific global state.
525  *
526  * For consistency with atomic helpers this function does a complete swap,
527  * i.e. it also puts the current state into @state, even though there is no
528  * need for that at this moment.
529  */
530 void intel_dpll_swap_state(struct intel_atomic_state *state)
531 {
532 	struct intel_display *display = to_intel_display(state);
533 	struct intel_dpll_state *dpll_state = state->dpll_state;
534 	struct intel_dpll *pll;
535 	int i;
536 
537 	if (!state->dpll_set)
538 		return;
539 
540 	for_each_dpll(display, pll, i)
541 		swap(pll->state, dpll_state[pll->index]);
542 }
543 
544 static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
545 				      struct intel_dpll *pll,
546 				      struct intel_dpll_hw_state *dpll_hw_state)
547 {
548 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
549 	const enum intel_dpll_id id = pll->info->id;
550 	struct ref_tracker *wakeref;
551 	u32 val;
552 
553 	wakeref = intel_display_power_get_if_enabled(display,
554 						     POWER_DOMAIN_DISPLAY_CORE);
555 	if (!wakeref)
556 		return false;
557 
558 	val = intel_de_read(display, PCH_DPLL(id));
559 	hw_state->dpll = val;
560 	hw_state->fp0 = intel_de_read(display, PCH_FP0(id));
561 	hw_state->fp1 = intel_de_read(display, PCH_FP1(id));
562 
563 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
564 
565 	return val & DPLL_VCO_ENABLE;
566 }
567 
568 static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
569 {
570 	u32 val;
571 	bool enabled;
572 
573 	val = intel_de_read(display, PCH_DREF_CONTROL);
574 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
575 			    DREF_SUPERSPREAD_SOURCE_MASK));
576 	INTEL_DISPLAY_STATE_WARN(display, !enabled,
577 				 "PCH refclk assertion failure, should be active but is disabled\n");
578 }
579 
580 static void ibx_pch_dpll_enable(struct intel_display *display,
581 				struct intel_dpll *pll,
582 				const struct intel_dpll_hw_state *dpll_hw_state)
583 {
584 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
585 	const enum intel_dpll_id id = pll->info->id;
586 
587 	/* PCH refclock must be enabled first */
588 	ibx_assert_pch_refclk_enabled(display);
589 
590 	intel_de_write(display, PCH_FP0(id), hw_state->fp0);
591 	intel_de_write(display, PCH_FP1(id), hw_state->fp1);
592 
593 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
594 
595 	/* Wait for the clocks to stabilize. */
596 	intel_de_posting_read(display, PCH_DPLL(id));
597 	udelay(150);
598 
599 	/* The pixel multiplier can only be updated once the
600 	 * DPLL is enabled and the clocks are stable.
601 	 *
602 	 * So write it again.
603 	 */
604 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
605 	intel_de_posting_read(display, PCH_DPLL(id));
606 	udelay(200);
607 }
608 
609 static void ibx_pch_dpll_disable(struct intel_display *display,
610 				 struct intel_dpll *pll)
611 {
612 	const enum intel_dpll_id id = pll->info->id;
613 
614 	intel_de_write(display, PCH_DPLL(id), 0);
615 	intel_de_posting_read(display, PCH_DPLL(id));
616 	udelay(200);
617 }
618 
619 static int ibx_compute_dpll(struct intel_atomic_state *state,
620 			    struct intel_crtc *crtc,
621 			    struct intel_encoder *encoder)
622 {
623 	return 0;
624 }
625 
626 static int ibx_get_dpll(struct intel_atomic_state *state,
627 			struct intel_crtc *crtc,
628 			struct intel_encoder *encoder)
629 {
630 	struct intel_display *display = to_intel_display(state);
631 	struct intel_crtc_state *crtc_state =
632 		intel_atomic_get_new_crtc_state(state, crtc);
633 	struct intel_dpll *pll;
634 	enum intel_dpll_id id;
635 
636 	if (HAS_PCH_IBX(display)) {
637 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
638 		id = (enum intel_dpll_id) crtc->pipe;
639 		pll = intel_get_dpll_by_id(display, id);
640 
641 		drm_dbg_kms(display->drm,
642 			    "[CRTC:%d:%s] using pre-allocated %s\n",
643 			    crtc->base.base.id, crtc->base.name,
644 			    pll->info->name);
645 	} else {
646 		pll = intel_find_dpll(state, crtc,
647 				      &crtc_state->dpll_hw_state,
648 				      BIT(DPLL_ID_PCH_PLL_B) |
649 				      BIT(DPLL_ID_PCH_PLL_A));
650 	}
651 
652 	if (!pll)
653 		return -EINVAL;
654 
655 	/* reference the pll */
656 	intel_reference_dpll(state, crtc,
657 			     pll, &crtc_state->dpll_hw_state);
658 
659 	crtc_state->intel_dpll = pll;
660 
661 	return 0;
662 }
663 
664 static void ibx_dump_hw_state(struct drm_printer *p,
665 			      const struct intel_dpll_hw_state *dpll_hw_state)
666 {
667 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
668 
669 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
670 		   "fp0: 0x%x, fp1: 0x%x\n",
671 		   hw_state->dpll,
672 		   hw_state->dpll_md,
673 		   hw_state->fp0,
674 		   hw_state->fp1);
675 }
676 
677 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
678 				 const struct intel_dpll_hw_state *_b)
679 {
680 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
681 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
682 
683 	return a->dpll == b->dpll &&
684 		a->dpll_md == b->dpll_md &&
685 		a->fp0 == b->fp0 &&
686 		a->fp1 == b->fp1;
687 }
688 
689 static const struct intel_dpll_funcs ibx_pch_dpll_funcs = {
690 	.enable = ibx_pch_dpll_enable,
691 	.disable = ibx_pch_dpll_disable,
692 	.get_hw_state = ibx_pch_dpll_get_hw_state,
693 };
694 
695 static const struct dpll_info pch_plls[] = {
696 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
697 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
698 	{}
699 };
700 
701 static const struct intel_dpll_mgr pch_pll_mgr = {
702 	.dpll_info = pch_plls,
703 	.compute_dplls = ibx_compute_dpll,
704 	.get_dplls = ibx_get_dpll,
705 	.put_dplls = intel_put_dpll,
706 	.dump_hw_state = ibx_dump_hw_state,
707 	.compare_hw_state = ibx_compare_hw_state,
708 };
709 
710 static void hsw_ddi_wrpll_enable(struct intel_display *display,
711 				 struct intel_dpll *pll,
712 				 const struct intel_dpll_hw_state *dpll_hw_state)
713 {
714 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
715 	const enum intel_dpll_id id = pll->info->id;
716 
717 	intel_de_write(display, WRPLL_CTL(id), hw_state->wrpll);
718 	intel_de_posting_read(display, WRPLL_CTL(id));
719 	udelay(20);
720 }
721 
722 static void hsw_ddi_spll_enable(struct intel_display *display,
723 				struct intel_dpll *pll,
724 				const struct intel_dpll_hw_state *dpll_hw_state)
725 {
726 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
727 
728 	intel_de_write(display, SPLL_CTL, hw_state->spll);
729 	intel_de_posting_read(display, SPLL_CTL);
730 	udelay(20);
731 }
732 
733 static void hsw_ddi_wrpll_disable(struct intel_display *display,
734 				  struct intel_dpll *pll)
735 {
736 	const enum intel_dpll_id id = pll->info->id;
737 
738 	intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
739 	intel_de_posting_read(display, WRPLL_CTL(id));
740 
741 	/*
742 	 * Try to set up the PCH reference clock once all DPLLs
743 	 * that depend on it have been shut down.
744 	 */
745 	if (display->dpll.pch_ssc_use & BIT(id))
746 		intel_init_pch_refclk(display);
747 }
748 
749 static void hsw_ddi_spll_disable(struct intel_display *display,
750 				 struct intel_dpll *pll)
751 {
752 	enum intel_dpll_id id = pll->info->id;
753 
754 	intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
755 	intel_de_posting_read(display, SPLL_CTL);
756 
757 	/*
758 	 * Try to set up the PCH reference clock once all DPLLs
759 	 * that depend on it have been shut down.
760 	 */
761 	if (display->dpll.pch_ssc_use & BIT(id))
762 		intel_init_pch_refclk(display);
763 }
764 
765 static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
766 				       struct intel_dpll *pll,
767 				       struct intel_dpll_hw_state *dpll_hw_state)
768 {
769 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
770 	const enum intel_dpll_id id = pll->info->id;
771 	struct ref_tracker *wakeref;
772 	u32 val;
773 
774 	wakeref = intel_display_power_get_if_enabled(display,
775 						     POWER_DOMAIN_DISPLAY_CORE);
776 	if (!wakeref)
777 		return false;
778 
779 	val = intel_de_read(display, WRPLL_CTL(id));
780 	hw_state->wrpll = val;
781 
782 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
783 
784 	return val & WRPLL_PLL_ENABLE;
785 }
786 
787 static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
788 				      struct intel_dpll *pll,
789 				      struct intel_dpll_hw_state *dpll_hw_state)
790 {
791 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
792 	struct ref_tracker *wakeref;
793 	u32 val;
794 
795 	wakeref = intel_display_power_get_if_enabled(display,
796 						     POWER_DOMAIN_DISPLAY_CORE);
797 	if (!wakeref)
798 		return false;
799 
800 	val = intel_de_read(display, SPLL_CTL);
801 	hw_state->spll = val;
802 
803 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
804 
805 	return val & SPLL_PLL_ENABLE;
806 }
807 
808 #define LC_FREQ 2700
809 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
810 
811 #define P_MIN 2
812 #define P_MAX 64
813 #define P_INC 2
814 
815 /* Constraints for PLL good behavior */
816 #define REF_MIN 48
817 #define REF_MAX 400
818 #define VCO_MIN 2400
819 #define VCO_MAX 4800
820 
821 struct hsw_wrpll_rnp {
822 	unsigned p, n2, r2;
823 };
824 
825 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
826 {
827 	switch (clock) {
828 	case 25175000:
829 	case 25200000:
830 	case 27000000:
831 	case 27027000:
832 	case 37762500:
833 	case 37800000:
834 	case 40500000:
835 	case 40541000:
836 	case 54000000:
837 	case 54054000:
838 	case 59341000:
839 	case 59400000:
840 	case 72000000:
841 	case 74176000:
842 	case 74250000:
843 	case 81000000:
844 	case 81081000:
845 	case 89012000:
846 	case 89100000:
847 	case 108000000:
848 	case 108108000:
849 	case 111264000:
850 	case 111375000:
851 	case 148352000:
852 	case 148500000:
853 	case 162000000:
854 	case 162162000:
855 	case 222525000:
856 	case 222750000:
857 	case 296703000:
858 	case 297000000:
859 		return 0;
860 	case 233500000:
861 	case 245250000:
862 	case 247750000:
863 	case 253250000:
864 	case 298000000:
865 		return 1500;
866 	case 169128000:
867 	case 169500000:
868 	case 179500000:
869 	case 202000000:
870 		return 2000;
871 	case 256250000:
872 	case 262500000:
873 	case 270000000:
874 	case 272500000:
875 	case 273750000:
876 	case 280750000:
877 	case 281250000:
878 	case 286000000:
879 	case 291750000:
880 		return 4000;
881 	case 267250000:
882 	case 268500000:
883 		return 5000;
884 	default:
885 		return 1000;
886 	}
887 }
888 
889 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
890 				 unsigned int r2, unsigned int n2,
891 				 unsigned int p,
892 				 struct hsw_wrpll_rnp *best)
893 {
894 	u64 a, b, c, d, diff, diff_best;
895 
896 	/* No best (r,n,p) yet */
897 	if (best->p == 0) {
898 		best->p = p;
899 		best->n2 = n2;
900 		best->r2 = r2;
901 		return;
902 	}
903 
904 	/*
905 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
906 	 * freq2k.
907 	 *
908 	 * delta = 1e6 *
909 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
910 	 *	   freq2k;
911 	 *
912 	 * and we would like delta <= budget.
913 	 *
914 	 * If the discrepancy is above the PPM-based budget, always prefer to
915 	 * improve upon the previous solution.  However, if you're within the
916 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
917 	 */
918 	a = freq2k * budget * p * r2;
919 	b = freq2k * budget * best->p * best->r2;
920 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
921 	diff_best = abs_diff(freq2k * best->p * best->r2,
922 			     LC_FREQ_2K * best->n2);
923 	c = 1000000 * diff;
924 	d = 1000000 * diff_best;
925 
926 	if (a < c && b < d) {
927 		/* If both are above the budget, pick the closer */
928 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
929 			best->p = p;
930 			best->n2 = n2;
931 			best->r2 = r2;
932 		}
933 	} else if (a >= c && b < d) {
934 		/* If A is below the threshold but B is above it?  Update. */
935 		best->p = p;
936 		best->n2 = n2;
937 		best->r2 = r2;
938 	} else if (a >= c && b >= d) {
939 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
940 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
941 			best->p = p;
942 			best->n2 = n2;
943 			best->r2 = r2;
944 		}
945 	}
946 	/* Otherwise a < c && b >= d, do nothing */
947 }
948 
949 static void
950 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
951 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
952 {
953 	u64 freq2k;
954 	unsigned p, n2, r2;
955 	struct hsw_wrpll_rnp best = {};
956 	unsigned budget;
957 
958 	freq2k = clock / 100;
959 
960 	budget = hsw_wrpll_get_budget_for_freq(clock);
961 
962 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
963 	 * and directly pass the LC PLL to it. */
964 	if (freq2k == 5400000) {
965 		*n2_out = 2;
966 		*p_out = 1;
967 		*r2_out = 2;
968 		return;
969 	}
970 
971 	/*
972 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
973 	 * the WR PLL.
974 	 *
975 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
976 	 * Injecting R2 = 2 * R gives:
977 	 *   REF_MAX * r2 > LC_FREQ * 2 and
978 	 *   REF_MIN * r2 < LC_FREQ * 2
979 	 *
980 	 * Which means the desired boundaries for r2 are:
981 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
982 	 *
983 	 */
984 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
985 	     r2 <= LC_FREQ * 2 / REF_MIN;
986 	     r2++) {
987 
988 		/*
989 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
990 		 *
991 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
992 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
993 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
994 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
995 		 *
996 		 * Which means the desired boundaries for n2 are:
997 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
998 		 */
999 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
1000 		     n2 <= VCO_MAX * r2 / LC_FREQ;
1001 		     n2++) {
1002 
1003 			for (p = P_MIN; p <= P_MAX; p += P_INC)
1004 				hsw_wrpll_update_rnp(freq2k, budget,
1005 						     r2, n2, p, &best);
1006 		}
1007 	}
1008 
1009 	*n2_out = best.n2;
1010 	*p_out = best.p;
1011 	*r2_out = best.r2;
1012 }
1013 
1014 static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
1015 				  const struct intel_dpll *pll,
1016 				  const struct intel_dpll_hw_state *dpll_hw_state)
1017 {
1018 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1019 	int refclk;
1020 	int n, p, r;
1021 	u32 wrpll = hw_state->wrpll;
1022 
1023 	switch (wrpll & WRPLL_REF_MASK) {
1024 	case WRPLL_REF_SPECIAL_HSW:
1025 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1026 		if (display->platform.haswell && !display->platform.haswell_ult) {
1027 			refclk = display->dpll.ref_clks.nssc;
1028 			break;
1029 		}
1030 		fallthrough;
1031 	case WRPLL_REF_PCH_SSC:
1032 		/*
1033 		 * We could calculate spread here, but our checking
1034 		 * code only cares about 5% accuracy, and spread is a max of
1035 		 * 0.5% downspread.
1036 		 */
1037 		refclk = display->dpll.ref_clks.ssc;
1038 		break;
1039 	case WRPLL_REF_LCPLL:
1040 		refclk = 2700000;
1041 		break;
1042 	default:
1043 		MISSING_CASE(wrpll);
1044 		return 0;
1045 	}
1046 
1047 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1048 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1049 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1050 
1051 	/* Convert to KHz, p & r have a fixed point portion */
1052 	return (refclk * n / 10) / (p * r) * 2;
1053 }
1054 
1055 static int
1056 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1057 			   struct intel_crtc *crtc)
1058 {
1059 	struct intel_display *display = to_intel_display(state);
1060 	struct intel_crtc_state *crtc_state =
1061 		intel_atomic_get_new_crtc_state(state, crtc);
1062 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1063 	unsigned int p, n2, r2;
1064 
1065 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1066 
1067 	hw_state->wrpll =
1068 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1069 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1070 		WRPLL_DIVIDER_POST(p);
1071 
1072 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(display, NULL,
1073 							&crtc_state->dpll_hw_state);
1074 
1075 	return 0;
1076 }
1077 
1078 static struct intel_dpll *
1079 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1080 		       struct intel_crtc *crtc)
1081 {
1082 	struct intel_crtc_state *crtc_state =
1083 		intel_atomic_get_new_crtc_state(state, crtc);
1084 
1085 	return intel_find_dpll(state, crtc,
1086 				      &crtc_state->dpll_hw_state,
1087 				      BIT(DPLL_ID_WRPLL2) |
1088 				      BIT(DPLL_ID_WRPLL1));
1089 }
1090 
1091 static int
1092 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1093 {
1094 	struct intel_display *display = to_intel_display(crtc_state);
1095 	int clock = crtc_state->port_clock;
1096 
1097 	switch (clock / 2) {
1098 	case 81000:
1099 	case 135000:
1100 	case 270000:
1101 		return 0;
1102 	default:
1103 		drm_dbg_kms(display->drm, "Invalid clock for DP: %d\n",
1104 			    clock);
1105 		return -EINVAL;
1106 	}
1107 }
1108 
1109 static struct intel_dpll *
1110 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1111 {
1112 	struct intel_display *display = to_intel_display(crtc_state);
1113 	struct intel_dpll *pll;
1114 	enum intel_dpll_id pll_id;
1115 	int clock = crtc_state->port_clock;
1116 
1117 	switch (clock / 2) {
1118 	case 81000:
1119 		pll_id = DPLL_ID_LCPLL_810;
1120 		break;
1121 	case 135000:
1122 		pll_id = DPLL_ID_LCPLL_1350;
1123 		break;
1124 	case 270000:
1125 		pll_id = DPLL_ID_LCPLL_2700;
1126 		break;
1127 	default:
1128 		MISSING_CASE(clock / 2);
1129 		return NULL;
1130 	}
1131 
1132 	pll = intel_get_dpll_by_id(display, pll_id);
1133 
1134 	if (!pll)
1135 		return NULL;
1136 
1137 	return pll;
1138 }
1139 
1140 static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
1141 				  const struct intel_dpll *pll,
1142 				  const struct intel_dpll_hw_state *dpll_hw_state)
1143 {
1144 	int link_clock = 0;
1145 
1146 	switch (pll->info->id) {
1147 	case DPLL_ID_LCPLL_810:
1148 		link_clock = 81000;
1149 		break;
1150 	case DPLL_ID_LCPLL_1350:
1151 		link_clock = 135000;
1152 		break;
1153 	case DPLL_ID_LCPLL_2700:
1154 		link_clock = 270000;
1155 		break;
1156 	default:
1157 		drm_WARN(display->drm, 1, "bad port clock sel\n");
1158 		break;
1159 	}
1160 
1161 	return link_clock * 2;
1162 }
1163 
1164 static int
1165 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1166 			  struct intel_crtc *crtc)
1167 {
1168 	struct intel_crtc_state *crtc_state =
1169 		intel_atomic_get_new_crtc_state(state, crtc);
1170 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1171 
1172 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1173 		return -EINVAL;
1174 
1175 	hw_state->spll =
1176 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1177 
1178 	return 0;
1179 }
1180 
1181 static struct intel_dpll *
1182 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1183 		      struct intel_crtc *crtc)
1184 {
1185 	struct intel_crtc_state *crtc_state =
1186 		intel_atomic_get_new_crtc_state(state, crtc);
1187 
1188 	return intel_find_dpll(state, crtc, &crtc_state->dpll_hw_state,
1189 				      BIT(DPLL_ID_SPLL));
1190 }
1191 
1192 static int hsw_ddi_spll_get_freq(struct intel_display *display,
1193 				 const struct intel_dpll *pll,
1194 				 const struct intel_dpll_hw_state *dpll_hw_state)
1195 {
1196 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1197 	int link_clock = 0;
1198 
1199 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1200 	case SPLL_FREQ_810MHz:
1201 		link_clock = 81000;
1202 		break;
1203 	case SPLL_FREQ_1350MHz:
1204 		link_clock = 135000;
1205 		break;
1206 	case SPLL_FREQ_2700MHz:
1207 		link_clock = 270000;
1208 		break;
1209 	default:
1210 		drm_WARN(display->drm, 1, "bad spll freq\n");
1211 		break;
1212 	}
1213 
1214 	return link_clock * 2;
1215 }
1216 
1217 static int hsw_compute_dpll(struct intel_atomic_state *state,
1218 			    struct intel_crtc *crtc,
1219 			    struct intel_encoder *encoder)
1220 {
1221 	struct intel_crtc_state *crtc_state =
1222 		intel_atomic_get_new_crtc_state(state, crtc);
1223 
1224 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1225 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1226 	else if (intel_crtc_has_dp_encoder(crtc_state))
1227 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1228 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1229 		return hsw_ddi_spll_compute_dpll(state, crtc);
1230 	else
1231 		return -EINVAL;
1232 }
1233 
1234 static int hsw_get_dpll(struct intel_atomic_state *state,
1235 			struct intel_crtc *crtc,
1236 			struct intel_encoder *encoder)
1237 {
1238 	struct intel_crtc_state *crtc_state =
1239 		intel_atomic_get_new_crtc_state(state, crtc);
1240 	struct intel_dpll *pll = NULL;
1241 
1242 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1243 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1244 	else if (intel_crtc_has_dp_encoder(crtc_state))
1245 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1246 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1247 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1248 
1249 	if (!pll)
1250 		return -EINVAL;
1251 
1252 	intel_reference_dpll(state, crtc,
1253 			     pll, &crtc_state->dpll_hw_state);
1254 
1255 	crtc_state->intel_dpll = pll;
1256 
1257 	return 0;
1258 }
1259 
1260 static void hsw_update_dpll_ref_clks(struct intel_display *display)
1261 {
1262 	display->dpll.ref_clks.ssc = 135000;
1263 	/* Non-SSC is only used on non-ULT HSW. */
1264 	if (intel_de_read(display, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1265 		display->dpll.ref_clks.nssc = 24000;
1266 	else
1267 		display->dpll.ref_clks.nssc = 135000;
1268 }
1269 
1270 static void hsw_dump_hw_state(struct drm_printer *p,
1271 			      const struct intel_dpll_hw_state *dpll_hw_state)
1272 {
1273 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1274 
1275 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1276 		   hw_state->wrpll, hw_state->spll);
1277 }
1278 
1279 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1280 				 const struct intel_dpll_hw_state *_b)
1281 {
1282 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1283 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1284 
1285 	return a->wrpll == b->wrpll &&
1286 		a->spll == b->spll;
1287 }
1288 
1289 static const struct intel_dpll_funcs hsw_ddi_wrpll_funcs = {
1290 	.enable = hsw_ddi_wrpll_enable,
1291 	.disable = hsw_ddi_wrpll_disable,
1292 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1293 	.get_freq = hsw_ddi_wrpll_get_freq,
1294 };
1295 
1296 static const struct intel_dpll_funcs hsw_ddi_spll_funcs = {
1297 	.enable = hsw_ddi_spll_enable,
1298 	.disable = hsw_ddi_spll_disable,
1299 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1300 	.get_freq = hsw_ddi_spll_get_freq,
1301 };
1302 
1303 static void hsw_ddi_lcpll_enable(struct intel_display *display,
1304 				 struct intel_dpll *pll,
1305 				 const struct intel_dpll_hw_state *hw_state)
1306 {
1307 }
1308 
1309 static void hsw_ddi_lcpll_disable(struct intel_display *display,
1310 				  struct intel_dpll *pll)
1311 {
1312 }
1313 
1314 static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
1315 				       struct intel_dpll *pll,
1316 				       struct intel_dpll_hw_state *dpll_hw_state)
1317 {
1318 	return true;
1319 }
1320 
1321 static const struct intel_dpll_funcs hsw_ddi_lcpll_funcs = {
1322 	.enable = hsw_ddi_lcpll_enable,
1323 	.disable = hsw_ddi_lcpll_disable,
1324 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1325 	.get_freq = hsw_ddi_lcpll_get_freq,
1326 };
1327 
1328 static const struct dpll_info hsw_plls[] = {
1329 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1330 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1331 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1332 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1333 	  .always_on = true, },
1334 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1335 	  .always_on = true, },
1336 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1337 	  .always_on = true, },
1338 	{}
1339 };
1340 
1341 static const struct intel_dpll_mgr hsw_pll_mgr = {
1342 	.dpll_info = hsw_plls,
1343 	.compute_dplls = hsw_compute_dpll,
1344 	.get_dplls = hsw_get_dpll,
1345 	.put_dplls = intel_put_dpll,
1346 	.update_ref_clks = hsw_update_dpll_ref_clks,
1347 	.dump_hw_state = hsw_dump_hw_state,
1348 	.compare_hw_state = hsw_compare_hw_state,
1349 };
1350 
1351 struct skl_dpll_regs {
1352 	i915_reg_t ctl, cfgcr1, cfgcr2;
1353 };
1354 
1355 /* this array is indexed by the *shared* pll id */
1356 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1357 	{
1358 		/* DPLL 0 */
1359 		.ctl = LCPLL1_CTL,
1360 		/* DPLL 0 doesn't support HDMI mode */
1361 	},
1362 	{
1363 		/* DPLL 1 */
1364 		.ctl = LCPLL2_CTL,
1365 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1366 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1367 	},
1368 	{
1369 		/* DPLL 2 */
1370 		.ctl = WRPLL_CTL(0),
1371 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1372 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1373 	},
1374 	{
1375 		/* DPLL 3 */
1376 		.ctl = WRPLL_CTL(1),
1377 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1378 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1379 	},
1380 };
1381 
1382 static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
1383 				    struct intel_dpll *pll,
1384 				    const struct skl_dpll_hw_state *hw_state)
1385 {
1386 	const enum intel_dpll_id id = pll->info->id;
1387 
1388 	intel_de_rmw(display, DPLL_CTRL1,
1389 		     DPLL_CTRL1_HDMI_MODE(id) |
1390 		     DPLL_CTRL1_SSC(id) |
1391 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1392 		     hw_state->ctrl1 << (id * 6));
1393 	intel_de_posting_read(display, DPLL_CTRL1);
1394 }
1395 
1396 static void skl_ddi_pll_enable(struct intel_display *display,
1397 			       struct intel_dpll *pll,
1398 			       const struct intel_dpll_hw_state *dpll_hw_state)
1399 {
1400 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1401 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1402 	const enum intel_dpll_id id = pll->info->id;
1403 
1404 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1405 
1406 	intel_de_write(display, regs[id].cfgcr1, hw_state->cfgcr1);
1407 	intel_de_write(display, regs[id].cfgcr2, hw_state->cfgcr2);
1408 	intel_de_posting_read(display, regs[id].cfgcr1);
1409 	intel_de_posting_read(display, regs[id].cfgcr2);
1410 
1411 	/* the enable bit is always bit 31 */
1412 	intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1413 
1414 	if (intel_de_wait_for_set_ms(display, DPLL_STATUS, DPLL_LOCK(id), 5))
1415 		drm_err(display->drm, "DPLL %d not locked\n", id);
1416 }
1417 
1418 static void skl_ddi_dpll0_enable(struct intel_display *display,
1419 				 struct intel_dpll *pll,
1420 				 const struct intel_dpll_hw_state *dpll_hw_state)
1421 {
1422 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1423 
1424 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1425 }
1426 
1427 static void skl_ddi_pll_disable(struct intel_display *display,
1428 				struct intel_dpll *pll)
1429 {
1430 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1431 	const enum intel_dpll_id id = pll->info->id;
1432 
1433 	/* the enable bit is always bit 31 */
1434 	intel_de_rmw(display, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1435 	intel_de_posting_read(display, regs[id].ctl);
1436 }
1437 
1438 static void skl_ddi_dpll0_disable(struct intel_display *display,
1439 				  struct intel_dpll *pll)
1440 {
1441 }
1442 
1443 static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
1444 				     struct intel_dpll *pll,
1445 				     struct intel_dpll_hw_state *dpll_hw_state)
1446 {
1447 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1448 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1449 	const enum intel_dpll_id id = pll->info->id;
1450 	struct ref_tracker *wakeref;
1451 	bool ret;
1452 	u32 val;
1453 
1454 	wakeref = intel_display_power_get_if_enabled(display,
1455 						     POWER_DOMAIN_DISPLAY_CORE);
1456 	if (!wakeref)
1457 		return false;
1458 
1459 	ret = false;
1460 
1461 	val = intel_de_read(display, regs[id].ctl);
1462 	if (!(val & LCPLL_PLL_ENABLE))
1463 		goto out;
1464 
1465 	val = intel_de_read(display, DPLL_CTRL1);
1466 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1467 
1468 	/* avoid reading back stale values if HDMI mode is not enabled */
1469 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1470 		hw_state->cfgcr1 = intel_de_read(display, regs[id].cfgcr1);
1471 		hw_state->cfgcr2 = intel_de_read(display, regs[id].cfgcr2);
1472 	}
1473 	ret = true;
1474 
1475 out:
1476 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1477 
1478 	return ret;
1479 }
1480 
1481 static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
1482 				       struct intel_dpll *pll,
1483 				       struct intel_dpll_hw_state *dpll_hw_state)
1484 {
1485 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1486 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1487 	const enum intel_dpll_id id = pll->info->id;
1488 	struct ref_tracker *wakeref;
1489 	u32 val;
1490 	bool ret;
1491 
1492 	wakeref = intel_display_power_get_if_enabled(display,
1493 						     POWER_DOMAIN_DISPLAY_CORE);
1494 	if (!wakeref)
1495 		return false;
1496 
1497 	ret = false;
1498 
1499 	/* DPLL0 is always enabled since it drives CDCLK */
1500 	val = intel_de_read(display, regs[id].ctl);
1501 	if (drm_WARN_ON(display->drm, !(val & LCPLL_PLL_ENABLE)))
1502 		goto out;
1503 
1504 	val = intel_de_read(display, DPLL_CTRL1);
1505 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1506 
1507 	ret = true;
1508 
1509 out:
1510 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1511 
1512 	return ret;
1513 }
1514 
1515 struct skl_wrpll_context {
1516 	u64 min_deviation;		/* current minimal deviation */
1517 	u64 central_freq;		/* chosen central freq */
1518 	u64 dco_freq;			/* chosen dco freq */
1519 	unsigned int p;			/* chosen divider */
1520 };
1521 
1522 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1523 #define SKL_DCO_MAX_PDEVIATION	100
1524 #define SKL_DCO_MAX_NDEVIATION	600
1525 
1526 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1527 				  u64 central_freq,
1528 				  u64 dco_freq,
1529 				  unsigned int divider)
1530 {
1531 	u64 deviation;
1532 
1533 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1534 			      central_freq);
1535 
1536 	/* positive deviation */
1537 	if (dco_freq >= central_freq) {
1538 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1539 		    deviation < ctx->min_deviation) {
1540 			ctx->min_deviation = deviation;
1541 			ctx->central_freq = central_freq;
1542 			ctx->dco_freq = dco_freq;
1543 			ctx->p = divider;
1544 		}
1545 	/* negative deviation */
1546 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1547 		   deviation < ctx->min_deviation) {
1548 		ctx->min_deviation = deviation;
1549 		ctx->central_freq = central_freq;
1550 		ctx->dco_freq = dco_freq;
1551 		ctx->p = divider;
1552 	}
1553 }
1554 
1555 static void skl_wrpll_get_multipliers(unsigned int p,
1556 				      unsigned int *p0 /* out */,
1557 				      unsigned int *p1 /* out */,
1558 				      unsigned int *p2 /* out */)
1559 {
1560 	/* even dividers */
1561 	if (p % 2 == 0) {
1562 		unsigned int half = p / 2;
1563 
1564 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1565 			*p0 = 2;
1566 			*p1 = 1;
1567 			*p2 = half;
1568 		} else if (half % 2 == 0) {
1569 			*p0 = 2;
1570 			*p1 = half / 2;
1571 			*p2 = 2;
1572 		} else if (half % 3 == 0) {
1573 			*p0 = 3;
1574 			*p1 = half / 3;
1575 			*p2 = 2;
1576 		} else if (half % 7 == 0) {
1577 			*p0 = 7;
1578 			*p1 = half / 7;
1579 			*p2 = 2;
1580 		}
1581 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1582 		*p0 = 3;
1583 		*p1 = 1;
1584 		*p2 = p / 3;
1585 	} else if (p == 5 || p == 7) {
1586 		*p0 = p;
1587 		*p1 = 1;
1588 		*p2 = 1;
1589 	} else if (p == 15) {
1590 		*p0 = 3;
1591 		*p1 = 1;
1592 		*p2 = 5;
1593 	} else if (p == 21) {
1594 		*p0 = 7;
1595 		*p1 = 1;
1596 		*p2 = 3;
1597 	} else if (p == 35) {
1598 		*p0 = 7;
1599 		*p1 = 1;
1600 		*p2 = 5;
1601 	}
1602 }
1603 
1604 struct skl_wrpll_params {
1605 	u32 dco_fraction;
1606 	u32 dco_integer;
1607 	u32 qdiv_ratio;
1608 	u32 qdiv_mode;
1609 	u32 kdiv;
1610 	u32 pdiv;
1611 	u32 central_freq;
1612 };
1613 
1614 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1615 				      u64 afe_clock,
1616 				      int ref_clock,
1617 				      u64 central_freq,
1618 				      u32 p0, u32 p1, u32 p2)
1619 {
1620 	u64 dco_freq;
1621 
1622 	switch (central_freq) {
1623 	case 9600000000ULL:
1624 		params->central_freq = 0;
1625 		break;
1626 	case 9000000000ULL:
1627 		params->central_freq = 1;
1628 		break;
1629 	case 8400000000ULL:
1630 		params->central_freq = 3;
1631 	}
1632 
1633 	switch (p0) {
1634 	case 1:
1635 		params->pdiv = 0;
1636 		break;
1637 	case 2:
1638 		params->pdiv = 1;
1639 		break;
1640 	case 3:
1641 		params->pdiv = 2;
1642 		break;
1643 	case 7:
1644 		params->pdiv = 4;
1645 		break;
1646 	default:
1647 		WARN(1, "Incorrect PDiv\n");
1648 	}
1649 
1650 	switch (p2) {
1651 	case 5:
1652 		params->kdiv = 0;
1653 		break;
1654 	case 2:
1655 		params->kdiv = 1;
1656 		break;
1657 	case 3:
1658 		params->kdiv = 2;
1659 		break;
1660 	case 1:
1661 		params->kdiv = 3;
1662 		break;
1663 	default:
1664 		WARN(1, "Incorrect KDiv\n");
1665 	}
1666 
1667 	params->qdiv_ratio = p1;
1668 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1669 
1670 	dco_freq = p0 * p1 * p2 * afe_clock;
1671 
1672 	/*
1673 	 * Intermediate values are in Hz.
1674 	 * Divide by MHz to match bsepc
1675 	 */
1676 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1677 	params->dco_fraction =
1678 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1679 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1680 }
1681 
1682 static int
1683 skl_ddi_calculate_wrpll(int clock,
1684 			int ref_clock,
1685 			struct skl_wrpll_params *wrpll_params)
1686 {
1687 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1688 						 9000000000ULL,
1689 						 9600000000ULL };
1690 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1691 					    24, 28, 30, 32, 36, 40, 42, 44,
1692 					    48, 52, 54, 56, 60, 64, 66, 68,
1693 					    70, 72, 76, 78, 80, 84, 88, 90,
1694 					    92, 96, 98 };
1695 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1696 	static const struct {
1697 		const u8 *list;
1698 		int n_dividers;
1699 	} dividers[] = {
1700 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1701 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1702 	};
1703 	struct skl_wrpll_context ctx = {
1704 		.min_deviation = U64_MAX,
1705 	};
1706 	unsigned int dco, d, i;
1707 	unsigned int p0, p1, p2;
1708 	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1709 
1710 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1711 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1712 			for (i = 0; i < dividers[d].n_dividers; i++) {
1713 				unsigned int p = dividers[d].list[i];
1714 				u64 dco_freq = p * afe_clock;
1715 
1716 				skl_wrpll_try_divider(&ctx,
1717 						      dco_central_freq[dco],
1718 						      dco_freq,
1719 						      p);
1720 				/*
1721 				 * Skip the remaining dividers if we're sure to
1722 				 * have found the definitive divider, we can't
1723 				 * improve a 0 deviation.
1724 				 */
1725 				if (ctx.min_deviation == 0)
1726 					goto skip_remaining_dividers;
1727 			}
1728 		}
1729 
1730 skip_remaining_dividers:
1731 		/*
1732 		 * If a solution is found with an even divider, prefer
1733 		 * this one.
1734 		 */
1735 		if (d == 0 && ctx.p)
1736 			break;
1737 	}
1738 
1739 	if (!ctx.p)
1740 		return -EINVAL;
1741 
1742 	/*
1743 	 * gcc incorrectly analyses that these can be used without being
1744 	 * initialized. To be fair, it's hard to guess.
1745 	 */
1746 	p0 = p1 = p2 = 0;
1747 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1748 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1749 				  ctx.central_freq, p0, p1, p2);
1750 
1751 	return 0;
1752 }
1753 
1754 static int skl_ddi_wrpll_get_freq(struct intel_display *display,
1755 				  const struct intel_dpll *pll,
1756 				  const struct intel_dpll_hw_state *dpll_hw_state)
1757 {
1758 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1759 	int ref_clock = display->dpll.ref_clks.nssc;
1760 	u32 p0, p1, p2, dco_freq;
1761 
1762 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1763 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1764 
1765 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1766 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1767 	else
1768 		p1 = 1;
1769 
1770 
1771 	switch (p0) {
1772 	case DPLL_CFGCR2_PDIV_1:
1773 		p0 = 1;
1774 		break;
1775 	case DPLL_CFGCR2_PDIV_2:
1776 		p0 = 2;
1777 		break;
1778 	case DPLL_CFGCR2_PDIV_3:
1779 		p0 = 3;
1780 		break;
1781 	case DPLL_CFGCR2_PDIV_7_INVALID:
1782 		/*
1783 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1784 		 * handling it the same way as PDIV_7.
1785 		 */
1786 		drm_dbg_kms(display->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1787 		fallthrough;
1788 	case DPLL_CFGCR2_PDIV_7:
1789 		p0 = 7;
1790 		break;
1791 	default:
1792 		MISSING_CASE(p0);
1793 		return 0;
1794 	}
1795 
1796 	switch (p2) {
1797 	case DPLL_CFGCR2_KDIV_5:
1798 		p2 = 5;
1799 		break;
1800 	case DPLL_CFGCR2_KDIV_2:
1801 		p2 = 2;
1802 		break;
1803 	case DPLL_CFGCR2_KDIV_3:
1804 		p2 = 3;
1805 		break;
1806 	case DPLL_CFGCR2_KDIV_1:
1807 		p2 = 1;
1808 		break;
1809 	default:
1810 		MISSING_CASE(p2);
1811 		return 0;
1812 	}
1813 
1814 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1815 		   ref_clock;
1816 
1817 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1818 		    ref_clock / 0x8000;
1819 
1820 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
1821 		return 0;
1822 
1823 	return dco_freq / (p0 * p1 * p2 * 5);
1824 }
1825 
1826 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1827 {
1828 	struct intel_display *display = to_intel_display(crtc_state);
1829 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1830 	struct skl_wrpll_params wrpll_params = {};
1831 	int ret;
1832 
1833 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1834 				      display->dpll.ref_clks.nssc, &wrpll_params);
1835 	if (ret)
1836 		return ret;
1837 
1838 	/*
1839 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1840 	 * as the DPLL id in this function.
1841 	 */
1842 	hw_state->ctrl1 =
1843 		DPLL_CTRL1_OVERRIDE(0) |
1844 		DPLL_CTRL1_HDMI_MODE(0);
1845 
1846 	hw_state->cfgcr1 =
1847 		DPLL_CFGCR1_FREQ_ENABLE |
1848 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1849 		wrpll_params.dco_integer;
1850 
1851 	hw_state->cfgcr2 =
1852 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1853 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1854 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1855 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1856 		wrpll_params.central_freq;
1857 
1858 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(display, NULL,
1859 							&crtc_state->dpll_hw_state);
1860 
1861 	return 0;
1862 }
1863 
1864 static int
1865 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1866 {
1867 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1868 	u32 ctrl1;
1869 
1870 	/*
1871 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1872 	 * as the DPLL id in this function.
1873 	 */
1874 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1875 	switch (crtc_state->port_clock / 2) {
1876 	case 81000:
1877 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1878 		break;
1879 	case 135000:
1880 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1881 		break;
1882 	case 270000:
1883 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1884 		break;
1885 		/* eDP 1.4 rates */
1886 	case 162000:
1887 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1888 		break;
1889 	case 108000:
1890 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1891 		break;
1892 	case 216000:
1893 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1894 		break;
1895 	}
1896 
1897 	hw_state->ctrl1 = ctrl1;
1898 
1899 	return 0;
1900 }
1901 
1902 static int skl_ddi_lcpll_get_freq(struct intel_display *display,
1903 				  const struct intel_dpll *pll,
1904 				  const struct intel_dpll_hw_state *dpll_hw_state)
1905 {
1906 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1907 	int link_clock = 0;
1908 
1909 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1910 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1911 	case DPLL_CTRL1_LINK_RATE_810:
1912 		link_clock = 81000;
1913 		break;
1914 	case DPLL_CTRL1_LINK_RATE_1080:
1915 		link_clock = 108000;
1916 		break;
1917 	case DPLL_CTRL1_LINK_RATE_1350:
1918 		link_clock = 135000;
1919 		break;
1920 	case DPLL_CTRL1_LINK_RATE_1620:
1921 		link_clock = 162000;
1922 		break;
1923 	case DPLL_CTRL1_LINK_RATE_2160:
1924 		link_clock = 216000;
1925 		break;
1926 	case DPLL_CTRL1_LINK_RATE_2700:
1927 		link_clock = 270000;
1928 		break;
1929 	default:
1930 		drm_WARN(display->drm, 1, "Unsupported link rate\n");
1931 		break;
1932 	}
1933 
1934 	return link_clock * 2;
1935 }
1936 
1937 static int skl_compute_dpll(struct intel_atomic_state *state,
1938 			    struct intel_crtc *crtc,
1939 			    struct intel_encoder *encoder)
1940 {
1941 	struct intel_crtc_state *crtc_state =
1942 		intel_atomic_get_new_crtc_state(state, crtc);
1943 
1944 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1945 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1946 	else if (intel_crtc_has_dp_encoder(crtc_state))
1947 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1948 	else
1949 		return -EINVAL;
1950 }
1951 
1952 static int skl_get_dpll(struct intel_atomic_state *state,
1953 			struct intel_crtc *crtc,
1954 			struct intel_encoder *encoder)
1955 {
1956 	struct intel_crtc_state *crtc_state =
1957 		intel_atomic_get_new_crtc_state(state, crtc);
1958 	struct intel_dpll *pll;
1959 
1960 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1961 		pll = intel_find_dpll(state, crtc,
1962 				      &crtc_state->dpll_hw_state,
1963 				      BIT(DPLL_ID_SKL_DPLL0));
1964 	else
1965 		pll = intel_find_dpll(state, crtc,
1966 				      &crtc_state->dpll_hw_state,
1967 				      BIT(DPLL_ID_SKL_DPLL3) |
1968 				      BIT(DPLL_ID_SKL_DPLL2) |
1969 				      BIT(DPLL_ID_SKL_DPLL1));
1970 	if (!pll)
1971 		return -EINVAL;
1972 
1973 	intel_reference_dpll(state, crtc,
1974 			     pll, &crtc_state->dpll_hw_state);
1975 
1976 	crtc_state->intel_dpll = pll;
1977 
1978 	return 0;
1979 }
1980 
1981 static int skl_ddi_pll_get_freq(struct intel_display *display,
1982 				const struct intel_dpll *pll,
1983 				const struct intel_dpll_hw_state *dpll_hw_state)
1984 {
1985 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1986 
1987 	/*
1988 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1989 	 * the internal shift for each field
1990 	 */
1991 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1992 		return skl_ddi_wrpll_get_freq(display, pll, dpll_hw_state);
1993 	else
1994 		return skl_ddi_lcpll_get_freq(display, pll, dpll_hw_state);
1995 }
1996 
1997 static void skl_update_dpll_ref_clks(struct intel_display *display)
1998 {
1999 	/* No SSC ref */
2000 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
2001 }
2002 
2003 static void skl_dump_hw_state(struct drm_printer *p,
2004 			      const struct intel_dpll_hw_state *dpll_hw_state)
2005 {
2006 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
2007 
2008 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
2009 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
2010 }
2011 
2012 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
2013 				 const struct intel_dpll_hw_state *_b)
2014 {
2015 	const struct skl_dpll_hw_state *a = &_a->skl;
2016 	const struct skl_dpll_hw_state *b = &_b->skl;
2017 
2018 	return a->ctrl1 == b->ctrl1 &&
2019 		a->cfgcr1 == b->cfgcr1 &&
2020 		a->cfgcr2 == b->cfgcr2;
2021 }
2022 
2023 static const struct intel_dpll_funcs skl_ddi_pll_funcs = {
2024 	.enable = skl_ddi_pll_enable,
2025 	.disable = skl_ddi_pll_disable,
2026 	.get_hw_state = skl_ddi_pll_get_hw_state,
2027 	.get_freq = skl_ddi_pll_get_freq,
2028 };
2029 
2030 static const struct intel_dpll_funcs skl_ddi_dpll0_funcs = {
2031 	.enable = skl_ddi_dpll0_enable,
2032 	.disable = skl_ddi_dpll0_disable,
2033 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2034 	.get_freq = skl_ddi_pll_get_freq,
2035 };
2036 
2037 static const struct dpll_info skl_plls[] = {
2038 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2039 	  .always_on = true, },
2040 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2041 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2042 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2043 	{}
2044 };
2045 
2046 static const struct intel_dpll_mgr skl_pll_mgr = {
2047 	.dpll_info = skl_plls,
2048 	.compute_dplls = skl_compute_dpll,
2049 	.get_dplls = skl_get_dpll,
2050 	.put_dplls = intel_put_dpll,
2051 	.update_ref_clks = skl_update_dpll_ref_clks,
2052 	.dump_hw_state = skl_dump_hw_state,
2053 	.compare_hw_state = skl_compare_hw_state,
2054 };
2055 
2056 static void bxt_ddi_pll_enable(struct intel_display *display,
2057 			       struct intel_dpll *pll,
2058 			       const struct intel_dpll_hw_state *dpll_hw_state)
2059 {
2060 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2061 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2062 	enum dpio_phy phy = DPIO_PHY0;
2063 	enum dpio_channel ch = DPIO_CH0;
2064 	u32 temp;
2065 	int ret;
2066 
2067 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2068 
2069 	/* Non-SSC reference */
2070 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2071 
2072 	if (display->platform.geminilake) {
2073 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2074 			     0, PORT_PLL_POWER_ENABLE);
2075 
2076 		ret = intel_de_wait_for_set_us(display,
2077 					       BXT_PORT_PLL_ENABLE(port),
2078 					       PORT_PLL_POWER_STATE, 200);
2079 		if (ret)
2080 			drm_err(display->drm,
2081 				"Power state not set for PLL:%d\n", port);
2082 	}
2083 
2084 	/* Disable 10 bit clock */
2085 	intel_de_rmw(display, BXT_PORT_PLL_EBB_4(phy, ch),
2086 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2087 
2088 	/* Write P1 & P2 */
2089 	intel_de_rmw(display, BXT_PORT_PLL_EBB_0(phy, ch),
2090 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2091 
2092 	/* Write M2 integer */
2093 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 0),
2094 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2095 
2096 	/* Write N */
2097 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 1),
2098 		     PORT_PLL_N_MASK, hw_state->pll1);
2099 
2100 	/* Write M2 fraction */
2101 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 2),
2102 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2103 
2104 	/* Write M2 fraction enable */
2105 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 3),
2106 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2107 
2108 	/* Write coeff */
2109 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2110 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2111 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2112 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2113 	temp |= hw_state->pll6;
2114 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 6), temp);
2115 
2116 	/* Write calibration val */
2117 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 8),
2118 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2119 
2120 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 9),
2121 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2122 
2123 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2124 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2125 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2126 	temp |= hw_state->pll10;
2127 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 10), temp);
2128 
2129 	/* Recalibrate with new settings */
2130 	temp = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2131 	temp |= PORT_PLL_RECALIBRATE;
2132 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2133 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2134 	temp |= hw_state->ebb4;
2135 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2136 
2137 	/* Enable PLL */
2138 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2139 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2140 
2141 	ret = intel_de_wait_for_set_us(display, BXT_PORT_PLL_ENABLE(port),
2142 				       PORT_PLL_LOCK, 200);
2143 	if (ret)
2144 		drm_err(display->drm, "PLL %d not locked\n", port);
2145 
2146 	if (display->platform.geminilake) {
2147 		temp = intel_de_read(display, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2148 		temp |= DCC_DELAY_RANGE_2;
2149 		intel_de_write(display, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2150 	}
2151 
2152 	/*
2153 	 * While we write to the group register to program all lanes at once we
2154 	 * can read only lane registers and we pick lanes 0/1 for that.
2155 	 */
2156 	temp = intel_de_read(display, BXT_PORT_PCS_DW12_LN01(phy, ch));
2157 	temp &= ~LANE_STAGGER_MASK;
2158 	temp &= ~LANESTAGGER_STRAP_OVRD;
2159 	temp |= hw_state->pcsdw12;
2160 	intel_de_write(display, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2161 }
2162 
2163 static void bxt_ddi_pll_disable(struct intel_display *display,
2164 				struct intel_dpll *pll)
2165 {
2166 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2167 	int ret;
2168 
2169 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2170 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2171 
2172 	if (display->platform.geminilake) {
2173 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2174 			     PORT_PLL_POWER_ENABLE, 0);
2175 
2176 		ret = intel_de_wait_for_clear_us(display,
2177 						 BXT_PORT_PLL_ENABLE(port),
2178 						 PORT_PLL_POWER_STATE, 200);
2179 		if (ret)
2180 			drm_err(display->drm,
2181 				"Power state not reset for PLL:%d\n", port);
2182 	}
2183 }
2184 
2185 static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
2186 				     struct intel_dpll *pll,
2187 				     struct intel_dpll_hw_state *dpll_hw_state)
2188 {
2189 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2190 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2191 	struct ref_tracker *wakeref;
2192 	enum dpio_phy phy;
2193 	enum dpio_channel ch;
2194 	u32 val;
2195 	bool ret;
2196 
2197 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2198 
2199 	wakeref = intel_display_power_get_if_enabled(display,
2200 						     POWER_DOMAIN_DISPLAY_CORE);
2201 	if (!wakeref)
2202 		return false;
2203 
2204 	ret = false;
2205 
2206 	val = intel_de_read(display, BXT_PORT_PLL_ENABLE(port));
2207 	if (!(val & PORT_PLL_ENABLE))
2208 		goto out;
2209 
2210 	hw_state->ebb0 = intel_de_read(display, BXT_PORT_PLL_EBB_0(phy, ch));
2211 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2212 
2213 	hw_state->ebb4 = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2214 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2215 
2216 	hw_state->pll0 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 0));
2217 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2218 
2219 	hw_state->pll1 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 1));
2220 	hw_state->pll1 &= PORT_PLL_N_MASK;
2221 
2222 	hw_state->pll2 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 2));
2223 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2224 
2225 	hw_state->pll3 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 3));
2226 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2227 
2228 	hw_state->pll6 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2229 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2230 			  PORT_PLL_INT_COEFF_MASK |
2231 			  PORT_PLL_GAIN_CTL_MASK;
2232 
2233 	hw_state->pll8 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 8));
2234 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2235 
2236 	hw_state->pll9 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 9));
2237 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2238 
2239 	hw_state->pll10 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2240 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2241 			   PORT_PLL_DCO_AMP_MASK;
2242 
2243 	/*
2244 	 * While we write to the group register to program all lanes at once we
2245 	 * can read only lane registers. We configure all lanes the same way, so
2246 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2247 	 */
2248 	hw_state->pcsdw12 = intel_de_read(display,
2249 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2250 	if (intel_de_read(display, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2251 		drm_dbg(display->drm,
2252 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2253 			hw_state->pcsdw12,
2254 			intel_de_read(display,
2255 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2256 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2257 
2258 	ret = true;
2259 
2260 out:
2261 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2262 
2263 	return ret;
2264 }
2265 
2266 /* pre-calculated values for DP linkrates */
2267 static const struct dpll bxt_dp_clk_val[] = {
2268 	/* m2 is .22 binary fixed point */
2269 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2270 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2271 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2272 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2273 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2274 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2275 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2276 };
2277 
2278 static int
2279 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2280 			  struct dpll *clk_div)
2281 {
2282 	struct intel_display *display = to_intel_display(crtc_state);
2283 
2284 	/* Calculate HDMI div */
2285 	/*
2286 	 * FIXME: tie the following calculation into
2287 	 * i9xx_crtc_compute_clock
2288 	 */
2289 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2290 		return -EINVAL;
2291 
2292 	drm_WARN_ON(display->drm, clk_div->m1 != 2);
2293 
2294 	return 0;
2295 }
2296 
2297 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2298 				    struct dpll *clk_div)
2299 {
2300 	struct intel_display *display = to_intel_display(crtc_state);
2301 	int i;
2302 
2303 	*clk_div = bxt_dp_clk_val[0];
2304 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2305 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2306 			*clk_div = bxt_dp_clk_val[i];
2307 			break;
2308 		}
2309 	}
2310 
2311 	chv_calc_dpll_params(display->dpll.ref_clks.nssc, clk_div);
2312 
2313 	drm_WARN_ON(display->drm, clk_div->vco == 0 ||
2314 		    clk_div->dot != crtc_state->port_clock);
2315 }
2316 
2317 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2318 				     const struct dpll *clk_div)
2319 {
2320 	struct intel_display *display = to_intel_display(crtc_state);
2321 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2322 	int clock = crtc_state->port_clock;
2323 	int vco = clk_div->vco;
2324 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2325 	u32 lanestagger;
2326 
2327 	if (vco >= 6200000 && vco <= 6700000) {
2328 		prop_coef = 4;
2329 		int_coef = 9;
2330 		gain_ctl = 3;
2331 		targ_cnt = 8;
2332 	} else if ((vco > 5400000 && vco < 6200000) ||
2333 			(vco >= 4800000 && vco < 5400000)) {
2334 		prop_coef = 5;
2335 		int_coef = 11;
2336 		gain_ctl = 3;
2337 		targ_cnt = 9;
2338 	} else if (vco == 5400000) {
2339 		prop_coef = 3;
2340 		int_coef = 8;
2341 		gain_ctl = 1;
2342 		targ_cnt = 9;
2343 	} else {
2344 		drm_err(display->drm, "Invalid VCO\n");
2345 		return -EINVAL;
2346 	}
2347 
2348 	if (clock > 270000)
2349 		lanestagger = 0x18;
2350 	else if (clock > 135000)
2351 		lanestagger = 0x0d;
2352 	else if (clock > 67000)
2353 		lanestagger = 0x07;
2354 	else if (clock > 33000)
2355 		lanestagger = 0x04;
2356 	else
2357 		lanestagger = 0x02;
2358 
2359 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2360 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2361 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2362 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2363 
2364 	if (clk_div->m2 & 0x3fffff)
2365 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2366 
2367 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2368 		PORT_PLL_INT_COEFF(int_coef) |
2369 		PORT_PLL_GAIN_CTL(gain_ctl);
2370 
2371 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2372 
2373 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2374 
2375 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2376 		PORT_PLL_DCO_AMP_OVR_EN_H;
2377 
2378 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2379 
2380 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2381 
2382 	return 0;
2383 }
2384 
2385 static int bxt_ddi_pll_get_freq(struct intel_display *display,
2386 				const struct intel_dpll *pll,
2387 				const struct intel_dpll_hw_state *dpll_hw_state)
2388 {
2389 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2390 	struct dpll clock;
2391 
2392 	clock.m1 = 2;
2393 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2394 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2395 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2396 					  hw_state->pll2);
2397 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2398 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2399 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2400 
2401 	return chv_calc_dpll_params(display->dpll.ref_clks.nssc, &clock);
2402 }
2403 
2404 static int
2405 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2406 {
2407 	struct dpll clk_div = {};
2408 
2409 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2410 
2411 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2412 }
2413 
2414 static int
2415 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2416 {
2417 	struct intel_display *display = to_intel_display(crtc_state);
2418 	struct dpll clk_div = {};
2419 	int ret;
2420 
2421 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2422 
2423 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2424 	if (ret)
2425 		return ret;
2426 
2427 	crtc_state->port_clock = bxt_ddi_pll_get_freq(display, NULL,
2428 						      &crtc_state->dpll_hw_state);
2429 
2430 	return 0;
2431 }
2432 
2433 static int bxt_compute_dpll(struct intel_atomic_state *state,
2434 			    struct intel_crtc *crtc,
2435 			    struct intel_encoder *encoder)
2436 {
2437 	struct intel_crtc_state *crtc_state =
2438 		intel_atomic_get_new_crtc_state(state, crtc);
2439 
2440 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2441 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2442 	else if (intel_crtc_has_dp_encoder(crtc_state))
2443 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2444 	else
2445 		return -EINVAL;
2446 }
2447 
2448 static int bxt_get_dpll(struct intel_atomic_state *state,
2449 			struct intel_crtc *crtc,
2450 			struct intel_encoder *encoder)
2451 {
2452 	struct intel_display *display = to_intel_display(state);
2453 	struct intel_crtc_state *crtc_state =
2454 		intel_atomic_get_new_crtc_state(state, crtc);
2455 	struct intel_dpll *pll;
2456 	enum intel_dpll_id id;
2457 
2458 	/* 1:1 mapping between ports and PLLs */
2459 	id = (enum intel_dpll_id) encoder->port;
2460 	pll = intel_get_dpll_by_id(display, id);
2461 
2462 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2463 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2464 
2465 	intel_reference_dpll(state, crtc,
2466 			     pll, &crtc_state->dpll_hw_state);
2467 
2468 	crtc_state->intel_dpll = pll;
2469 
2470 	return 0;
2471 }
2472 
2473 static void bxt_update_dpll_ref_clks(struct intel_display *display)
2474 {
2475 	display->dpll.ref_clks.ssc = 100000;
2476 	display->dpll.ref_clks.nssc = 100000;
2477 	/* DSI non-SSC ref 19.2MHz */
2478 }
2479 
2480 static void bxt_dump_hw_state(struct drm_printer *p,
2481 			      const struct intel_dpll_hw_state *dpll_hw_state)
2482 {
2483 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2484 
2485 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2486 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2487 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2488 		   hw_state->ebb0, hw_state->ebb4,
2489 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2490 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2491 		   hw_state->pcsdw12);
2492 }
2493 
2494 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2495 				 const struct intel_dpll_hw_state *_b)
2496 {
2497 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2498 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2499 
2500 	return a->ebb0 == b->ebb0 &&
2501 		a->ebb4 == b->ebb4 &&
2502 		a->pll0 == b->pll0 &&
2503 		a->pll1 == b->pll1 &&
2504 		a->pll2 == b->pll2 &&
2505 		a->pll3 == b->pll3 &&
2506 		a->pll6 == b->pll6 &&
2507 		a->pll8 == b->pll8 &&
2508 		a->pll10 == b->pll10 &&
2509 		a->pcsdw12 == b->pcsdw12;
2510 }
2511 
2512 static const struct intel_dpll_funcs bxt_ddi_pll_funcs = {
2513 	.enable = bxt_ddi_pll_enable,
2514 	.disable = bxt_ddi_pll_disable,
2515 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2516 	.get_freq = bxt_ddi_pll_get_freq,
2517 };
2518 
2519 static const struct dpll_info bxt_plls[] = {
2520 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2521 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2522 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2523 	{}
2524 };
2525 
2526 static const struct intel_dpll_mgr bxt_pll_mgr = {
2527 	.dpll_info = bxt_plls,
2528 	.compute_dplls = bxt_compute_dpll,
2529 	.get_dplls = bxt_get_dpll,
2530 	.put_dplls = intel_put_dpll,
2531 	.update_ref_clks = bxt_update_dpll_ref_clks,
2532 	.dump_hw_state = bxt_dump_hw_state,
2533 	.compare_hw_state = bxt_compare_hw_state,
2534 };
2535 
2536 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2537 				      int *qdiv, int *kdiv)
2538 {
2539 	/* even dividers */
2540 	if (bestdiv % 2 == 0) {
2541 		if (bestdiv == 2) {
2542 			*pdiv = 2;
2543 			*qdiv = 1;
2544 			*kdiv = 1;
2545 		} else if (bestdiv % 4 == 0) {
2546 			*pdiv = 2;
2547 			*qdiv = bestdiv / 4;
2548 			*kdiv = 2;
2549 		} else if (bestdiv % 6 == 0) {
2550 			*pdiv = 3;
2551 			*qdiv = bestdiv / 6;
2552 			*kdiv = 2;
2553 		} else if (bestdiv % 5 == 0) {
2554 			*pdiv = 5;
2555 			*qdiv = bestdiv / 10;
2556 			*kdiv = 2;
2557 		} else if (bestdiv % 14 == 0) {
2558 			*pdiv = 7;
2559 			*qdiv = bestdiv / 14;
2560 			*kdiv = 2;
2561 		}
2562 	} else {
2563 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2564 			*pdiv = bestdiv;
2565 			*qdiv = 1;
2566 			*kdiv = 1;
2567 		} else { /* 9, 15, 21 */
2568 			*pdiv = bestdiv / 3;
2569 			*qdiv = 1;
2570 			*kdiv = 3;
2571 		}
2572 	}
2573 }
2574 
2575 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2576 				      u32 dco_freq, u32 ref_freq,
2577 				      int pdiv, int qdiv, int kdiv)
2578 {
2579 	u32 dco;
2580 
2581 	switch (kdiv) {
2582 	case 1:
2583 		params->kdiv = 1;
2584 		break;
2585 	case 2:
2586 		params->kdiv = 2;
2587 		break;
2588 	case 3:
2589 		params->kdiv = 4;
2590 		break;
2591 	default:
2592 		WARN(1, "Incorrect KDiv\n");
2593 	}
2594 
2595 	switch (pdiv) {
2596 	case 2:
2597 		params->pdiv = 1;
2598 		break;
2599 	case 3:
2600 		params->pdiv = 2;
2601 		break;
2602 	case 5:
2603 		params->pdiv = 4;
2604 		break;
2605 	case 7:
2606 		params->pdiv = 8;
2607 		break;
2608 	default:
2609 		WARN(1, "Incorrect PDiv\n");
2610 	}
2611 
2612 	WARN_ON(kdiv != 2 && qdiv != 1);
2613 
2614 	params->qdiv_ratio = qdiv;
2615 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2616 
2617 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2618 
2619 	params->dco_integer = dco >> 15;
2620 	params->dco_fraction = dco & 0x7fff;
2621 }
2622 
2623 /*
2624  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2625  * Program half of the nominal DCO divider fraction value.
2626  */
2627 static bool
2628 ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
2629 {
2630 	return ((display->platform.elkhartlake &&
2631 		 IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
2632 		DISPLAY_VER(display) >= 12) &&
2633 		display->dpll.ref_clks.nssc == 38400;
2634 }
2635 
2636 struct icl_combo_pll_params {
2637 	int clock;
2638 	struct skl_wrpll_params wrpll;
2639 };
2640 
2641 /*
2642  * These values alrea already adjusted: they're the bits we write to the
2643  * registers, not the logical values.
2644  */
2645 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2646 	{ 540000,
2647 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2648 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2649 	{ 270000,
2650 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2651 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2652 	{ 162000,
2653 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2654 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2655 	{ 324000,
2656 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2657 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2658 	{ 216000,
2659 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2660 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2661 	{ 432000,
2662 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2663 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2664 	{ 648000,
2665 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2666 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2667 	{ 810000,
2668 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2669 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2670 };
2671 
2672 
2673 /* Also used for 38.4 MHz values. */
2674 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2675 	{ 540000,
2676 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2677 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2678 	{ 270000,
2679 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2680 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2681 	{ 162000,
2682 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2683 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2684 	{ 324000,
2685 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2686 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2687 	{ 216000,
2688 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2689 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2690 	{ 432000,
2691 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2692 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2693 	{ 648000,
2694 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2695 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2696 	{ 810000,
2697 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2698 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2699 };
2700 
2701 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2702 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2703 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2704 };
2705 
2706 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2707 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2708 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2709 };
2710 
2711 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2712 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2713 	/* the following params are unused */
2714 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2715 };
2716 
2717 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2718 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2719 	/* the following params are unused */
2720 };
2721 
2722 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2723 				 struct skl_wrpll_params *pll_params)
2724 {
2725 	struct intel_display *display = to_intel_display(crtc_state);
2726 	const struct icl_combo_pll_params *params =
2727 		display->dpll.ref_clks.nssc == 24000 ?
2728 		icl_dp_combo_pll_24MHz_values :
2729 		icl_dp_combo_pll_19_2MHz_values;
2730 	int clock = crtc_state->port_clock;
2731 	int i;
2732 
2733 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2734 		if (clock == params[i].clock) {
2735 			*pll_params = params[i].wrpll;
2736 			return 0;
2737 		}
2738 	}
2739 
2740 	MISSING_CASE(clock);
2741 	return -EINVAL;
2742 }
2743 
2744 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2745 			    struct skl_wrpll_params *pll_params)
2746 {
2747 	struct intel_display *display = to_intel_display(crtc_state);
2748 
2749 	if (DISPLAY_VER(display) >= 12) {
2750 		switch (display->dpll.ref_clks.nssc) {
2751 		default:
2752 			MISSING_CASE(display->dpll.ref_clks.nssc);
2753 			fallthrough;
2754 		case 19200:
2755 		case 38400:
2756 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2757 			break;
2758 		case 24000:
2759 			*pll_params = tgl_tbt_pll_24MHz_values;
2760 			break;
2761 		}
2762 	} else {
2763 		switch (display->dpll.ref_clks.nssc) {
2764 		default:
2765 			MISSING_CASE(display->dpll.ref_clks.nssc);
2766 			fallthrough;
2767 		case 19200:
2768 		case 38400:
2769 			*pll_params = icl_tbt_pll_19_2MHz_values;
2770 			break;
2771 		case 24000:
2772 			*pll_params = icl_tbt_pll_24MHz_values;
2773 			break;
2774 		}
2775 	}
2776 
2777 	return 0;
2778 }
2779 
2780 static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
2781 				    const struct intel_dpll *pll,
2782 				    const struct intel_dpll_hw_state *dpll_hw_state)
2783 {
2784 	/*
2785 	 * The PLL outputs multiple frequencies at the same time, selection is
2786 	 * made at DDI clock mux level.
2787 	 */
2788 	drm_WARN_ON(display->drm, 1);
2789 
2790 	return 0;
2791 }
2792 
2793 static int icl_wrpll_ref_clock(struct intel_display *display)
2794 {
2795 	int ref_clock = display->dpll.ref_clks.nssc;
2796 
2797 	/*
2798 	 * For ICL+, the spec states: if reference frequency is 38.4,
2799 	 * use 19.2 because the DPLL automatically divides that by 2.
2800 	 */
2801 	if (ref_clock == 38400)
2802 		ref_clock = 19200;
2803 
2804 	return ref_clock;
2805 }
2806 
2807 static int
2808 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2809 	       struct skl_wrpll_params *wrpll_params)
2810 {
2811 	struct intel_display *display = to_intel_display(crtc_state);
2812 	int ref_clock = icl_wrpll_ref_clock(display);
2813 	u32 afe_clock = crtc_state->port_clock * 5;
2814 	u32 dco_min = 7998000;
2815 	u32 dco_max = 10000000;
2816 	u32 dco_mid = (dco_min + dco_max) / 2;
2817 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2818 					 18, 20, 24, 28, 30, 32,  36,  40,
2819 					 42, 44, 48, 50, 52, 54,  56,  60,
2820 					 64, 66, 68, 70, 72, 76,  78,  80,
2821 					 84, 88, 90, 92, 96, 98, 100, 102,
2822 					  3,  5,  7,  9, 15, 21 };
2823 	u32 dco, best_dco = 0, dco_centrality = 0;
2824 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2825 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2826 
2827 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2828 		dco = afe_clock * dividers[d];
2829 
2830 		if (dco <= dco_max && dco >= dco_min) {
2831 			dco_centrality = abs(dco - dco_mid);
2832 
2833 			if (dco_centrality < best_dco_centrality) {
2834 				best_dco_centrality = dco_centrality;
2835 				best_div = dividers[d];
2836 				best_dco = dco;
2837 			}
2838 		}
2839 	}
2840 
2841 	if (best_div == 0)
2842 		return -EINVAL;
2843 
2844 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2845 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2846 				  pdiv, qdiv, kdiv);
2847 
2848 	return 0;
2849 }
2850 
2851 static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
2852 				      const struct intel_dpll *pll,
2853 				      const struct intel_dpll_hw_state *dpll_hw_state)
2854 {
2855 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2856 	int ref_clock = icl_wrpll_ref_clock(display);
2857 	u32 dco_fraction;
2858 	u32 p0, p1, p2, dco_freq;
2859 
2860 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2861 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2862 
2863 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2864 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2865 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2866 	else
2867 		p1 = 1;
2868 
2869 	switch (p0) {
2870 	case DPLL_CFGCR1_PDIV_2:
2871 		p0 = 2;
2872 		break;
2873 	case DPLL_CFGCR1_PDIV_3:
2874 		p0 = 3;
2875 		break;
2876 	case DPLL_CFGCR1_PDIV_5:
2877 		p0 = 5;
2878 		break;
2879 	case DPLL_CFGCR1_PDIV_7:
2880 		p0 = 7;
2881 		break;
2882 	}
2883 
2884 	switch (p2) {
2885 	case DPLL_CFGCR1_KDIV_1:
2886 		p2 = 1;
2887 		break;
2888 	case DPLL_CFGCR1_KDIV_2:
2889 		p2 = 2;
2890 		break;
2891 	case DPLL_CFGCR1_KDIV_3:
2892 		p2 = 3;
2893 		break;
2894 	}
2895 
2896 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2897 		   ref_clock;
2898 
2899 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2900 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2901 
2902 	if (ehl_combo_pll_div_frac_wa_needed(display))
2903 		dco_fraction *= 2;
2904 
2905 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2906 
2907 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
2908 		return 0;
2909 
2910 	return dco_freq / (p0 * p1 * p2 * 5);
2911 }
2912 
2913 static void icl_calc_dpll_state(struct intel_display *display,
2914 				const struct skl_wrpll_params *pll_params,
2915 				struct intel_dpll_hw_state *dpll_hw_state)
2916 {
2917 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2918 	u32 dco_fraction = pll_params->dco_fraction;
2919 
2920 	if (ehl_combo_pll_div_frac_wa_needed(display))
2921 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2922 
2923 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2924 			    pll_params->dco_integer;
2925 
2926 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2927 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2928 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2929 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2930 
2931 	if (DISPLAY_VER(display) >= 12)
2932 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2933 	else
2934 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2935 
2936 	if (display->vbt.override_afc_startup)
2937 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(display->vbt.override_afc_startup_val);
2938 }
2939 
2940 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2941 				    u32 *target_dco_khz,
2942 				    struct icl_dpll_hw_state *hw_state,
2943 				    bool is_dkl)
2944 {
2945 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2946 	u32 dco_min_freq, dco_max_freq;
2947 	unsigned int i;
2948 	int div2;
2949 
2950 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2951 	dco_max_freq = is_dp ? 8100000 : 10000000;
2952 
2953 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2954 		int div1 = div1_vals[i];
2955 
2956 		for (div2 = 10; div2 > 0; div2--) {
2957 			int dco = div1 * div2 * clock_khz * 5;
2958 			int a_divratio, tlinedrv, inputsel;
2959 			u32 hsdiv;
2960 
2961 			if (dco < dco_min_freq || dco > dco_max_freq)
2962 				continue;
2963 
2964 			if (div2 >= 2) {
2965 				/*
2966 				 * Note: a_divratio not matching TGL BSpec
2967 				 * algorithm but matching hardcoded values and
2968 				 * working on HW for DP alt-mode at least
2969 				 */
2970 				a_divratio = is_dp ? 10 : 5;
2971 				tlinedrv = is_dkl ? 1 : 2;
2972 			} else {
2973 				a_divratio = 5;
2974 				tlinedrv = 0;
2975 			}
2976 			inputsel = is_dp ? 0 : 1;
2977 
2978 			switch (div1) {
2979 			default:
2980 				MISSING_CASE(div1);
2981 				fallthrough;
2982 			case 2:
2983 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2984 				break;
2985 			case 3:
2986 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2987 				break;
2988 			case 5:
2989 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2990 				break;
2991 			case 7:
2992 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2993 				break;
2994 			}
2995 
2996 			*target_dco_khz = dco;
2997 
2998 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2999 
3000 			hw_state->mg_clktop2_coreclkctl1 =
3001 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3002 
3003 			hw_state->mg_clktop2_hsclkctl =
3004 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3005 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3006 				hsdiv |
3007 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3008 
3009 			return 0;
3010 		}
3011 	}
3012 
3013 	return -EINVAL;
3014 }
3015 
3016 /*
3017  * The specification for this function uses real numbers, so the math had to be
3018  * adapted to integer-only calculation, that's why it looks so different.
3019  */
3020 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3021 				 struct intel_dpll_hw_state *dpll_hw_state)
3022 {
3023 	struct intel_display *display = to_intel_display(crtc_state);
3024 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3025 	int refclk_khz = display->dpll.ref_clks.nssc;
3026 	int clock = crtc_state->port_clock;
3027 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3028 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3029 	u32 prop_coeff, int_coeff;
3030 	u32 tdc_targetcnt, feedfwgain;
3031 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3032 	u64 tmp;
3033 	bool use_ssc = false;
3034 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3035 	bool is_dkl = DISPLAY_VER(display) >= 12;
3036 	int ret;
3037 
3038 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3039 				       hw_state, is_dkl);
3040 	if (ret)
3041 		return ret;
3042 
3043 	m1div = 2;
3044 	m2div_int = dco_khz / (refclk_khz * m1div);
3045 	if (m2div_int > 255) {
3046 		if (!is_dkl) {
3047 			m1div = 4;
3048 			m2div_int = dco_khz / (refclk_khz * m1div);
3049 		}
3050 
3051 		if (m2div_int > 255)
3052 			return -EINVAL;
3053 	}
3054 	m2div_rem = dco_khz % (refclk_khz * m1div);
3055 
3056 	tmp = (u64)m2div_rem * (1 << 22);
3057 	do_div(tmp, refclk_khz * m1div);
3058 	m2div_frac = tmp;
3059 
3060 	switch (refclk_khz) {
3061 	case 19200:
3062 		iref_ndiv = 1;
3063 		iref_trim = 28;
3064 		iref_pulse_w = 1;
3065 		break;
3066 	case 24000:
3067 		iref_ndiv = 1;
3068 		iref_trim = 25;
3069 		iref_pulse_w = 2;
3070 		break;
3071 	case 38400:
3072 		iref_ndiv = 2;
3073 		iref_trim = 28;
3074 		iref_pulse_w = 1;
3075 		break;
3076 	default:
3077 		MISSING_CASE(refclk_khz);
3078 		return -EINVAL;
3079 	}
3080 
3081 	/*
3082 	 * tdc_res = 0.000003
3083 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3084 	 *
3085 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3086 	 * was supposed to be a division, but we rearranged the operations of
3087 	 * the formula to avoid early divisions so we don't multiply the
3088 	 * rounding errors.
3089 	 *
3090 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3091 	 * we also rearrange to work with integers.
3092 	 *
3093 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3094 	 * last division by 10.
3095 	 */
3096 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3097 
3098 	/*
3099 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3100 	 * 32 bits. That's not a problem since we round the division down
3101 	 * anyway.
3102 	 */
3103 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3104 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3105 
3106 	if (dco_khz >= 9000000) {
3107 		prop_coeff = 5;
3108 		int_coeff = 10;
3109 	} else {
3110 		prop_coeff = 4;
3111 		int_coeff = 8;
3112 	}
3113 
3114 	if (use_ssc) {
3115 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3116 		do_div(tmp, refclk_khz * m1div * 10000);
3117 		ssc_stepsize = tmp;
3118 
3119 		tmp = mul_u32_u32(dco_khz, 1000);
3120 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3121 	} else {
3122 		ssc_stepsize = 0;
3123 		ssc_steplen = 0;
3124 	}
3125 	ssc_steplog = 4;
3126 
3127 	/* write pll_state calculations */
3128 	if (is_dkl) {
3129 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3130 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3131 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3132 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3133 		if (display->vbt.override_afc_startup) {
3134 			u8 val = display->vbt.override_afc_startup_val;
3135 
3136 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3137 		}
3138 
3139 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3140 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3141 
3142 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3143 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3144 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3145 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3146 
3147 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3148 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3149 
3150 		hw_state->mg_pll_tdc_coldst_bias =
3151 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3152 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3153 
3154 	} else {
3155 		hw_state->mg_pll_div0 =
3156 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3157 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3158 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3159 
3160 		hw_state->mg_pll_div1 =
3161 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3162 			MG_PLL_DIV1_DITHER_DIV_2 |
3163 			MG_PLL_DIV1_NDIVRATIO(1) |
3164 			MG_PLL_DIV1_FBPREDIV(m1div);
3165 
3166 		hw_state->mg_pll_lf =
3167 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3168 			MG_PLL_LF_AFCCNTSEL_512 |
3169 			MG_PLL_LF_GAINCTRL(1) |
3170 			MG_PLL_LF_INT_COEFF(int_coeff) |
3171 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3172 
3173 		hw_state->mg_pll_frac_lock =
3174 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3175 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3176 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3177 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3178 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3179 		if (use_ssc || m2div_rem > 0)
3180 			hw_state->mg_pll_frac_lock |=
3181 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3182 
3183 		hw_state->mg_pll_ssc =
3184 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3185 			MG_PLL_SSC_TYPE(2) |
3186 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3187 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3188 			MG_PLL_SSC_FLLEN |
3189 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3190 
3191 		hw_state->mg_pll_tdc_coldst_bias =
3192 			MG_PLL_TDC_COLDST_COLDSTART |
3193 			MG_PLL_TDC_COLDST_IREFINT_EN |
3194 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3195 			MG_PLL_TDC_TDCOVCCORR_EN |
3196 			MG_PLL_TDC_TDCSEL(3);
3197 
3198 		hw_state->mg_pll_bias =
3199 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3200 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3201 			MG_PLL_BIAS_BIAS_BONUS(10) |
3202 			MG_PLL_BIAS_BIASCAL_EN |
3203 			MG_PLL_BIAS_CTRIM(12) |
3204 			MG_PLL_BIAS_VREF_RDAC(4) |
3205 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3206 
3207 		if (refclk_khz == 38400) {
3208 			hw_state->mg_pll_tdc_coldst_bias_mask =
3209 				MG_PLL_TDC_COLDST_COLDSTART;
3210 			hw_state->mg_pll_bias_mask = 0;
3211 		} else {
3212 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3213 			hw_state->mg_pll_bias_mask = -1U;
3214 		}
3215 
3216 		hw_state->mg_pll_tdc_coldst_bias &=
3217 			hw_state->mg_pll_tdc_coldst_bias_mask;
3218 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3219 	}
3220 
3221 	return 0;
3222 }
3223 
3224 static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
3225 				   const struct intel_dpll *pll,
3226 				   const struct intel_dpll_hw_state *dpll_hw_state)
3227 {
3228 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3229 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3230 	u64 tmp;
3231 
3232 	ref_clock = display->dpll.ref_clks.nssc;
3233 
3234 	if (DISPLAY_VER(display) >= 12) {
3235 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3236 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3237 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3238 
3239 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3240 			m2_frac = hw_state->mg_pll_bias &
3241 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3242 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3243 		} else {
3244 			m2_frac = 0;
3245 		}
3246 	} else {
3247 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3248 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3249 
3250 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3251 			m2_frac = hw_state->mg_pll_div0 &
3252 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3253 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3254 		} else {
3255 			m2_frac = 0;
3256 		}
3257 	}
3258 
3259 	switch (hw_state->mg_clktop2_hsclkctl &
3260 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3261 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3262 		div1 = 2;
3263 		break;
3264 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3265 		div1 = 3;
3266 		break;
3267 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3268 		div1 = 5;
3269 		break;
3270 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3271 		div1 = 7;
3272 		break;
3273 	default:
3274 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3275 		return 0;
3276 	}
3277 
3278 	div2 = (hw_state->mg_clktop2_hsclkctl &
3279 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3280 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3281 
3282 	/* div2 value of 0 is same as 1 means no div */
3283 	if (div2 == 0)
3284 		div2 = 1;
3285 
3286 	/*
3287 	 * Adjust the original formula to delay the division by 2^22 in order to
3288 	 * minimize possible rounding errors.
3289 	 */
3290 	tmp = (u64)m1 * m2_int * ref_clock +
3291 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3292 	tmp = div_u64(tmp, 5 * div1 * div2);
3293 
3294 	return tmp;
3295 }
3296 
3297 /**
3298  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3299  * @crtc_state: state for the CRTC to select the DPLL for
3300  * @port_dpll_id: the active @port_dpll_id to select
3301  *
3302  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3303  * CRTC.
3304  */
3305 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3306 			      enum icl_port_dpll_id port_dpll_id)
3307 {
3308 	struct icl_port_dpll *port_dpll =
3309 		&crtc_state->icl_port_dplls[port_dpll_id];
3310 
3311 	crtc_state->intel_dpll = port_dpll->pll;
3312 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3313 }
3314 
3315 static void icl_update_active_dpll(struct intel_atomic_state *state,
3316 				   struct intel_crtc *crtc,
3317 				   struct intel_encoder *encoder)
3318 {
3319 	struct intel_crtc_state *crtc_state =
3320 		intel_atomic_get_new_crtc_state(state, crtc);
3321 	struct intel_digital_port *primary_port;
3322 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3323 
3324 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3325 		enc_to_mst(encoder)->primary :
3326 		enc_to_dig_port(encoder);
3327 
3328 	if (primary_port &&
3329 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3330 	     intel_tc_port_in_legacy_mode(primary_port)))
3331 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3332 
3333 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3334 }
3335 
3336 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3337 				      struct intel_crtc *crtc)
3338 {
3339 	struct intel_display *display = to_intel_display(state);
3340 	struct intel_crtc_state *crtc_state =
3341 		intel_atomic_get_new_crtc_state(state, crtc);
3342 	struct icl_port_dpll *port_dpll =
3343 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3344 	struct skl_wrpll_params pll_params = {};
3345 	int ret;
3346 
3347 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3348 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3349 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3350 	else
3351 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3352 
3353 	if (ret)
3354 		return ret;
3355 
3356 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3357 
3358 	/* this is mainly for the fastset check */
3359 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3360 
3361 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(display, NULL,
3362 							    &port_dpll->hw_state);
3363 
3364 	return 0;
3365 }
3366 
3367 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3368 				  struct intel_crtc *crtc,
3369 				  struct intel_encoder *encoder)
3370 {
3371 	struct intel_display *display = to_intel_display(crtc);
3372 	struct intel_crtc_state *crtc_state =
3373 		intel_atomic_get_new_crtc_state(state, crtc);
3374 	struct icl_port_dpll *port_dpll =
3375 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3376 	enum port port = encoder->port;
3377 	unsigned long dpll_mask;
3378 
3379 	if (display->platform.alderlake_s) {
3380 		dpll_mask =
3381 			BIT(DPLL_ID_DG1_DPLL3) |
3382 			BIT(DPLL_ID_DG1_DPLL2) |
3383 			BIT(DPLL_ID_ICL_DPLL1) |
3384 			BIT(DPLL_ID_ICL_DPLL0);
3385 	} else if (display->platform.dg1) {
3386 		if (port == PORT_D || port == PORT_E) {
3387 			dpll_mask =
3388 				BIT(DPLL_ID_DG1_DPLL2) |
3389 				BIT(DPLL_ID_DG1_DPLL3);
3390 		} else {
3391 			dpll_mask =
3392 				BIT(DPLL_ID_DG1_DPLL0) |
3393 				BIT(DPLL_ID_DG1_DPLL1);
3394 		}
3395 	} else if (display->platform.rocketlake) {
3396 		dpll_mask =
3397 			BIT(DPLL_ID_EHL_DPLL4) |
3398 			BIT(DPLL_ID_ICL_DPLL1) |
3399 			BIT(DPLL_ID_ICL_DPLL0);
3400 	} else if ((display->platform.jasperlake ||
3401 		    display->platform.elkhartlake) &&
3402 		   port != PORT_A) {
3403 		dpll_mask =
3404 			BIT(DPLL_ID_EHL_DPLL4) |
3405 			BIT(DPLL_ID_ICL_DPLL1) |
3406 			BIT(DPLL_ID_ICL_DPLL0);
3407 	} else {
3408 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3409 	}
3410 
3411 	/* Eliminate DPLLs from consideration if reserved by HTI */
3412 	dpll_mask &= ~intel_hti_dpll_mask(display);
3413 
3414 	port_dpll->pll = intel_find_dpll(state, crtc,
3415 					 &port_dpll->hw_state,
3416 					 dpll_mask);
3417 	if (!port_dpll->pll)
3418 		return -EINVAL;
3419 
3420 	intel_reference_dpll(state, crtc,
3421 			     port_dpll->pll, &port_dpll->hw_state);
3422 
3423 	icl_update_active_dpll(state, crtc, encoder);
3424 
3425 	return 0;
3426 }
3427 
3428 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3429 				    struct intel_crtc *crtc)
3430 {
3431 	struct intel_display *display = to_intel_display(state);
3432 	struct intel_crtc_state *crtc_state =
3433 		intel_atomic_get_new_crtc_state(state, crtc);
3434 	const struct intel_crtc_state *old_crtc_state =
3435 		intel_atomic_get_old_crtc_state(state, crtc);
3436 	struct icl_port_dpll *port_dpll =
3437 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3438 	struct skl_wrpll_params pll_params = {};
3439 	int ret;
3440 
3441 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3442 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3443 	if (ret)
3444 		return ret;
3445 
3446 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3447 
3448 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3449 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3450 	if (ret)
3451 		return ret;
3452 
3453 	/* this is mainly for the fastset check */
3454 	if (old_crtc_state->intel_dpll &&
3455 	    old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3456 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3457 	else
3458 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3459 
3460 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(display, NULL,
3461 							 &port_dpll->hw_state);
3462 
3463 	return 0;
3464 }
3465 
3466 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3467 				struct intel_crtc *crtc,
3468 				struct intel_encoder *encoder)
3469 {
3470 	struct intel_crtc_state *crtc_state =
3471 		intel_atomic_get_new_crtc_state(state, crtc);
3472 	struct icl_port_dpll *port_dpll =
3473 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3474 	enum intel_dpll_id dpll_id;
3475 	int ret;
3476 
3477 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3478 	port_dpll->pll = intel_find_dpll(state, crtc,
3479 					 &port_dpll->hw_state,
3480 					 BIT(DPLL_ID_ICL_TBTPLL));
3481 	if (!port_dpll->pll)
3482 		return -EINVAL;
3483 	intel_reference_dpll(state, crtc,
3484 			     port_dpll->pll, &port_dpll->hw_state);
3485 
3486 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3487 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3488 	port_dpll->pll = intel_find_dpll(state, crtc,
3489 					 &port_dpll->hw_state,
3490 					 BIT(dpll_id));
3491 	if (!port_dpll->pll) {
3492 		ret = -EINVAL;
3493 		goto err_unreference_tbt_pll;
3494 	}
3495 	intel_reference_dpll(state, crtc,
3496 			     port_dpll->pll, &port_dpll->hw_state);
3497 
3498 	icl_update_active_dpll(state, crtc, encoder);
3499 
3500 	return 0;
3501 
3502 err_unreference_tbt_pll:
3503 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3504 	intel_unreference_dpll(state, crtc, port_dpll->pll);
3505 
3506 	return ret;
3507 }
3508 
3509 /*
3510  * Get the PLL for either a port using a C10 PHY PLL, or for a port using a
3511  * C20 PHY PLL in the cases of:
3512  * - BMG port A/B
3513  * - PTL port B eDP over TypeC PHY
3514  */
3515 static int mtl_get_non_tc_phy_dpll(struct intel_atomic_state *state,
3516 				      struct intel_crtc *crtc,
3517 				      struct intel_encoder *encoder)
3518 {
3519 	struct intel_display *display = to_intel_display(crtc);
3520 	struct intel_crtc_state *crtc_state =
3521 		intel_atomic_get_new_crtc_state(state, crtc);
3522 	struct icl_port_dpll *port_dpll =
3523 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3524 	enum intel_dpll_id pll_id = mtl_port_to_pll_id(display, encoder->port);
3525 
3526 	port_dpll->pll = intel_find_dpll(state, crtc,
3527 					 &port_dpll->hw_state,
3528 					 BIT(pll_id));
3529 	if (!port_dpll->pll)
3530 		return -EINVAL;
3531 
3532 	intel_reference_dpll(state, crtc,
3533 			     port_dpll->pll, &port_dpll->hw_state);
3534 
3535 	icl_update_active_dpll(state, crtc, encoder);
3536 
3537 	return 0;
3538 }
3539 
3540 static int icl_compute_dplls(struct intel_atomic_state *state,
3541 			     struct intel_crtc *crtc,
3542 			     struct intel_encoder *encoder)
3543 {
3544 	if (intel_encoder_is_combo(encoder))
3545 		return icl_compute_combo_phy_dpll(state, crtc);
3546 	else if (intel_encoder_is_tc(encoder))
3547 		return icl_compute_tc_phy_dplls(state, crtc);
3548 
3549 	MISSING_CASE(encoder->port);
3550 
3551 	return 0;
3552 }
3553 
3554 static int icl_get_dplls(struct intel_atomic_state *state,
3555 			 struct intel_crtc *crtc,
3556 			 struct intel_encoder *encoder)
3557 {
3558 	if (intel_encoder_is_combo(encoder))
3559 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3560 	else if (intel_encoder_is_tc(encoder))
3561 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3562 
3563 	MISSING_CASE(encoder->port);
3564 
3565 	return -EINVAL;
3566 }
3567 
3568 static void icl_put_dplls(struct intel_atomic_state *state,
3569 			  struct intel_crtc *crtc)
3570 {
3571 	const struct intel_crtc_state *old_crtc_state =
3572 		intel_atomic_get_old_crtc_state(state, crtc);
3573 	struct intel_crtc_state *new_crtc_state =
3574 		intel_atomic_get_new_crtc_state(state, crtc);
3575 	enum icl_port_dpll_id id;
3576 
3577 	new_crtc_state->intel_dpll = NULL;
3578 
3579 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3580 		const struct icl_port_dpll *old_port_dpll =
3581 			&old_crtc_state->icl_port_dplls[id];
3582 		struct icl_port_dpll *new_port_dpll =
3583 			&new_crtc_state->icl_port_dplls[id];
3584 
3585 		new_port_dpll->pll = NULL;
3586 
3587 		if (!old_port_dpll->pll)
3588 			continue;
3589 
3590 		intel_unreference_dpll(state, crtc, old_port_dpll->pll);
3591 	}
3592 }
3593 
3594 static bool mg_pll_get_hw_state(struct intel_display *display,
3595 				struct intel_dpll *pll,
3596 				struct intel_dpll_hw_state *dpll_hw_state)
3597 {
3598 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3599 	const enum intel_dpll_id id = pll->info->id;
3600 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3601 	struct ref_tracker *wakeref;
3602 	bool ret = false;
3603 	u32 val;
3604 
3605 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
3606 
3607 	wakeref = intel_display_power_get_if_enabled(display,
3608 						     POWER_DOMAIN_DISPLAY_CORE);
3609 	if (!wakeref)
3610 		return false;
3611 
3612 	val = intel_de_read(display, enable_reg);
3613 	if (!(val & PLL_ENABLE))
3614 		goto out;
3615 
3616 	hw_state->mg_refclkin_ctl = intel_de_read(display,
3617 						  MG_REFCLKIN_CTL(tc_port));
3618 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3619 
3620 	hw_state->mg_clktop2_coreclkctl1 =
3621 		intel_de_read(display, MG_CLKTOP2_CORECLKCTL1(tc_port));
3622 	hw_state->mg_clktop2_coreclkctl1 &=
3623 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3624 
3625 	hw_state->mg_clktop2_hsclkctl =
3626 		intel_de_read(display, MG_CLKTOP2_HSCLKCTL(tc_port));
3627 	hw_state->mg_clktop2_hsclkctl &=
3628 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3629 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3630 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3631 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3632 
3633 	hw_state->mg_pll_div0 = intel_de_read(display, MG_PLL_DIV0(tc_port));
3634 	hw_state->mg_pll_div1 = intel_de_read(display, MG_PLL_DIV1(tc_port));
3635 	hw_state->mg_pll_lf = intel_de_read(display, MG_PLL_LF(tc_port));
3636 	hw_state->mg_pll_frac_lock = intel_de_read(display,
3637 						   MG_PLL_FRAC_LOCK(tc_port));
3638 	hw_state->mg_pll_ssc = intel_de_read(display, MG_PLL_SSC(tc_port));
3639 
3640 	hw_state->mg_pll_bias = intel_de_read(display, MG_PLL_BIAS(tc_port));
3641 	hw_state->mg_pll_tdc_coldst_bias =
3642 		intel_de_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3643 
3644 	if (display->dpll.ref_clks.nssc == 38400) {
3645 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3646 		hw_state->mg_pll_bias_mask = 0;
3647 	} else {
3648 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3649 		hw_state->mg_pll_bias_mask = -1U;
3650 	}
3651 
3652 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3653 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3654 
3655 	ret = true;
3656 out:
3657 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3658 	return ret;
3659 }
3660 
3661 static bool dkl_pll_get_hw_state(struct intel_display *display,
3662 				 struct intel_dpll *pll,
3663 				 struct intel_dpll_hw_state *dpll_hw_state)
3664 {
3665 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3666 	const enum intel_dpll_id id = pll->info->id;
3667 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3668 	struct ref_tracker *wakeref;
3669 	bool ret = false;
3670 	u32 val;
3671 
3672 	wakeref = intel_display_power_get_if_enabled(display,
3673 						     POWER_DOMAIN_DISPLAY_CORE);
3674 	if (!wakeref)
3675 		return false;
3676 
3677 	val = intel_de_read(display, intel_tc_pll_enable_reg(display, pll));
3678 	if (!(val & PLL_ENABLE))
3679 		goto out;
3680 
3681 	/*
3682 	 * All registers read here have the same HIP_INDEX_REG even though
3683 	 * they are on different building blocks
3684 	 */
3685 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(display,
3686 						       DKL_REFCLKIN_CTL(tc_port));
3687 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3688 
3689 	hw_state->mg_clktop2_hsclkctl =
3690 		intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3691 	hw_state->mg_clktop2_hsclkctl &=
3692 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3693 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3694 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3695 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3696 
3697 	hw_state->mg_clktop2_coreclkctl1 =
3698 		intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3699 	hw_state->mg_clktop2_coreclkctl1 &=
3700 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3701 
3702 	hw_state->mg_pll_div0 = intel_dkl_phy_read(display, DKL_PLL_DIV0(tc_port));
3703 	val = DKL_PLL_DIV0_MASK;
3704 	if (display->vbt.override_afc_startup)
3705 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3706 	hw_state->mg_pll_div0 &= val;
3707 
3708 	hw_state->mg_pll_div1 = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3709 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3710 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3711 
3712 	hw_state->mg_pll_ssc = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3713 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3714 				 DKL_PLL_SSC_STEP_LEN_MASK |
3715 				 DKL_PLL_SSC_STEP_NUM_MASK |
3716 				 DKL_PLL_SSC_EN);
3717 
3718 	hw_state->mg_pll_bias = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3719 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3720 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3721 
3722 	hw_state->mg_pll_tdc_coldst_bias =
3723 		intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3724 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3725 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3726 
3727 	ret = true;
3728 out:
3729 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3730 	return ret;
3731 }
3732 
3733 static bool icl_pll_get_hw_state(struct intel_display *display,
3734 				 struct intel_dpll *pll,
3735 				 struct intel_dpll_hw_state *dpll_hw_state,
3736 				 i915_reg_t enable_reg)
3737 {
3738 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3739 	const enum intel_dpll_id id = pll->info->id;
3740 	struct ref_tracker *wakeref;
3741 	bool ret = false;
3742 	u32 val;
3743 
3744 	wakeref = intel_display_power_get_if_enabled(display,
3745 						     POWER_DOMAIN_DISPLAY_CORE);
3746 	if (!wakeref)
3747 		return false;
3748 
3749 	val = intel_de_read(display, enable_reg);
3750 	if (!(val & PLL_ENABLE))
3751 		goto out;
3752 
3753 	if (display->platform.alderlake_s) {
3754 		hw_state->cfgcr0 = intel_de_read(display, ADLS_DPLL_CFGCR0(id));
3755 		hw_state->cfgcr1 = intel_de_read(display, ADLS_DPLL_CFGCR1(id));
3756 	} else if (display->platform.dg1) {
3757 		hw_state->cfgcr0 = intel_de_read(display, DG1_DPLL_CFGCR0(id));
3758 		hw_state->cfgcr1 = intel_de_read(display, DG1_DPLL_CFGCR1(id));
3759 	} else if (display->platform.rocketlake) {
3760 		hw_state->cfgcr0 = intel_de_read(display,
3761 						 RKL_DPLL_CFGCR0(id));
3762 		hw_state->cfgcr1 = intel_de_read(display,
3763 						 RKL_DPLL_CFGCR1(id));
3764 	} else if (DISPLAY_VER(display) >= 12) {
3765 		hw_state->cfgcr0 = intel_de_read(display,
3766 						 TGL_DPLL_CFGCR0(id));
3767 		hw_state->cfgcr1 = intel_de_read(display,
3768 						 TGL_DPLL_CFGCR1(id));
3769 		if (display->vbt.override_afc_startup) {
3770 			hw_state->div0 = intel_de_read(display, TGL_DPLL0_DIV0(id));
3771 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3772 		}
3773 	} else {
3774 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3775 		    id == DPLL_ID_EHL_DPLL4) {
3776 			hw_state->cfgcr0 = intel_de_read(display,
3777 							 ICL_DPLL_CFGCR0(4));
3778 			hw_state->cfgcr1 = intel_de_read(display,
3779 							 ICL_DPLL_CFGCR1(4));
3780 		} else {
3781 			hw_state->cfgcr0 = intel_de_read(display,
3782 							 ICL_DPLL_CFGCR0(id));
3783 			hw_state->cfgcr1 = intel_de_read(display,
3784 							 ICL_DPLL_CFGCR1(id));
3785 		}
3786 	}
3787 
3788 	ret = true;
3789 out:
3790 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3791 	return ret;
3792 }
3793 
3794 static bool combo_pll_get_hw_state(struct intel_display *display,
3795 				   struct intel_dpll *pll,
3796 				   struct intel_dpll_hw_state *dpll_hw_state)
3797 {
3798 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3799 
3800 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
3801 }
3802 
3803 static bool icl_tbt_pll_get_hw_state(struct intel_display *display,
3804 				     struct intel_dpll *pll,
3805 				     struct intel_dpll_hw_state *dpll_hw_state)
3806 {
3807 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
3808 }
3809 
3810 static void icl_dpll_write(struct intel_display *display,
3811 			   struct intel_dpll *pll,
3812 			   const struct icl_dpll_hw_state *hw_state)
3813 {
3814 	const enum intel_dpll_id id = pll->info->id;
3815 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3816 
3817 	if (display->platform.alderlake_s) {
3818 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3819 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3820 	} else if (display->platform.dg1) {
3821 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3822 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3823 	} else if (display->platform.rocketlake) {
3824 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3825 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3826 	} else if (DISPLAY_VER(display) >= 12) {
3827 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3828 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3829 		div0_reg = TGL_DPLL0_DIV0(id);
3830 	} else {
3831 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3832 		    id == DPLL_ID_EHL_DPLL4) {
3833 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3834 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3835 		} else {
3836 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3837 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3838 		}
3839 	}
3840 
3841 	intel_de_write(display, cfgcr0_reg, hw_state->cfgcr0);
3842 	intel_de_write(display, cfgcr1_reg, hw_state->cfgcr1);
3843 	drm_WARN_ON_ONCE(display->drm, display->vbt.override_afc_startup &&
3844 			 !i915_mmio_reg_valid(div0_reg));
3845 	if (display->vbt.override_afc_startup &&
3846 	    i915_mmio_reg_valid(div0_reg))
3847 		intel_de_rmw(display, div0_reg,
3848 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3849 	intel_de_posting_read(display, cfgcr1_reg);
3850 }
3851 
3852 static void icl_mg_pll_write(struct intel_display *display,
3853 			     struct intel_dpll *pll,
3854 			     const struct icl_dpll_hw_state *hw_state)
3855 {
3856 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3857 
3858 	/*
3859 	 * Some of the following registers have reserved fields, so program
3860 	 * these with RMW based on a mask. The mask can be fixed or generated
3861 	 * during the calc/readout phase if the mask depends on some other HW
3862 	 * state like refclk, see icl_calc_mg_pll_state().
3863 	 */
3864 	intel_de_rmw(display, MG_REFCLKIN_CTL(tc_port),
3865 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3866 
3867 	intel_de_rmw(display, MG_CLKTOP2_CORECLKCTL1(tc_port),
3868 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3869 		     hw_state->mg_clktop2_coreclkctl1);
3870 
3871 	intel_de_rmw(display, MG_CLKTOP2_HSCLKCTL(tc_port),
3872 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3873 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3874 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3875 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3876 		     hw_state->mg_clktop2_hsclkctl);
3877 
3878 	intel_de_write(display, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3879 	intel_de_write(display, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3880 	intel_de_write(display, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3881 	intel_de_write(display, MG_PLL_FRAC_LOCK(tc_port),
3882 		       hw_state->mg_pll_frac_lock);
3883 	intel_de_write(display, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3884 
3885 	intel_de_rmw(display, MG_PLL_BIAS(tc_port),
3886 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3887 
3888 	intel_de_rmw(display, MG_PLL_TDC_COLDST_BIAS(tc_port),
3889 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3890 		     hw_state->mg_pll_tdc_coldst_bias);
3891 
3892 	intel_de_posting_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3893 }
3894 
3895 static void dkl_pll_write(struct intel_display *display,
3896 			  struct intel_dpll *pll,
3897 			  const struct icl_dpll_hw_state *hw_state)
3898 {
3899 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3900 	u32 val;
3901 
3902 	/*
3903 	 * All registers programmed here have the same HIP_INDEX_REG even
3904 	 * though on different building block
3905 	 */
3906 	/* All the registers are RMW */
3907 	val = intel_dkl_phy_read(display, DKL_REFCLKIN_CTL(tc_port));
3908 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3909 	val |= hw_state->mg_refclkin_ctl;
3910 	intel_dkl_phy_write(display, DKL_REFCLKIN_CTL(tc_port), val);
3911 
3912 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3913 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3914 	val |= hw_state->mg_clktop2_coreclkctl1;
3915 	intel_dkl_phy_write(display, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3916 
3917 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3918 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3919 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3920 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3921 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3922 	val |= hw_state->mg_clktop2_hsclkctl;
3923 	intel_dkl_phy_write(display, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3924 
3925 	val = DKL_PLL_DIV0_MASK;
3926 	if (display->vbt.override_afc_startup)
3927 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3928 	intel_dkl_phy_rmw(display, DKL_PLL_DIV0(tc_port), val,
3929 			  hw_state->mg_pll_div0);
3930 
3931 	val = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3932 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3933 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3934 	val |= hw_state->mg_pll_div1;
3935 	intel_dkl_phy_write(display, DKL_PLL_DIV1(tc_port), val);
3936 
3937 	val = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3938 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3939 		 DKL_PLL_SSC_STEP_LEN_MASK |
3940 		 DKL_PLL_SSC_STEP_NUM_MASK |
3941 		 DKL_PLL_SSC_EN);
3942 	val |= hw_state->mg_pll_ssc;
3943 	intel_dkl_phy_write(display, DKL_PLL_SSC(tc_port), val);
3944 
3945 	val = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3946 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3947 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3948 	val |= hw_state->mg_pll_bias;
3949 	intel_dkl_phy_write(display, DKL_PLL_BIAS(tc_port), val);
3950 
3951 	val = intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3952 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3953 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3954 	val |= hw_state->mg_pll_tdc_coldst_bias;
3955 	intel_dkl_phy_write(display, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3956 
3957 	intel_dkl_phy_posting_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3958 }
3959 
3960 static void icl_pll_power_enable(struct intel_display *display,
3961 				 struct intel_dpll *pll,
3962 				 i915_reg_t enable_reg)
3963 {
3964 	intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
3965 
3966 	/*
3967 	 * The spec says we need to "wait" but it also says it should be
3968 	 * immediate.
3969 	 */
3970 	if (intel_de_wait_for_set_ms(display, enable_reg, PLL_POWER_STATE, 1))
3971 		drm_err(display->drm, "PLL %d Power not enabled\n",
3972 			pll->info->id);
3973 }
3974 
3975 static void icl_pll_enable(struct intel_display *display,
3976 			   struct intel_dpll *pll,
3977 			   i915_reg_t enable_reg)
3978 {
3979 	intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
3980 
3981 	/* Timeout is actually 600us. */
3982 	if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 1))
3983 		drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
3984 }
3985 
3986 static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_dpll *pll)
3987 {
3988 	u32 val;
3989 
3990 	if (!(display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) ||
3991 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3992 		return;
3993 	/*
3994 	 * Wa_16011069516:adl-p[a0]
3995 	 *
3996 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3997 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3998 	 * sanity check this assumption with a double read, which presumably
3999 	 * returns the correct value even with clock gating on.
4000 	 *
4001 	 * Instead of the usual place for workarounds we apply this one here,
4002 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
4003 	 */
4004 	val = intel_de_read(display, TRANS_CMTG_CHICKEN);
4005 	val = intel_de_rmw(display, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
4006 	if (drm_WARN_ON(display->drm, val & ~DISABLE_DPT_CLK_GATING))
4007 		drm_dbg_kms(display->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
4008 }
4009 
4010 static void combo_pll_enable(struct intel_display *display,
4011 			     struct intel_dpll *pll,
4012 			     const struct intel_dpll_hw_state *dpll_hw_state)
4013 {
4014 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4015 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
4016 
4017 	icl_pll_power_enable(display, pll, enable_reg);
4018 
4019 	icl_dpll_write(display, pll, hw_state);
4020 
4021 	/*
4022 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4023 	 * paths should already be setting the appropriate voltage, hence we do
4024 	 * nothing here.
4025 	 */
4026 
4027 	icl_pll_enable(display, pll, enable_reg);
4028 
4029 	adlp_cmtg_clock_gating_wa(display, pll);
4030 
4031 	/* DVFS post sequence would be here. See the comment above. */
4032 }
4033 
4034 static void icl_tbt_pll_enable(struct intel_display *display,
4035 			       struct intel_dpll *pll,
4036 			       const struct intel_dpll_hw_state *dpll_hw_state)
4037 {
4038 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4039 
4040 	icl_pll_power_enable(display, pll, TBT_PLL_ENABLE);
4041 
4042 	icl_dpll_write(display, pll, hw_state);
4043 
4044 	/*
4045 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4046 	 * paths should already be setting the appropriate voltage, hence we do
4047 	 * nothing here.
4048 	 */
4049 
4050 	icl_pll_enable(display, pll, TBT_PLL_ENABLE);
4051 
4052 	/* DVFS post sequence would be here. See the comment above. */
4053 }
4054 
4055 static void mg_pll_enable(struct intel_display *display,
4056 			  struct intel_dpll *pll,
4057 			  const struct intel_dpll_hw_state *dpll_hw_state)
4058 {
4059 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4060 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4061 
4062 	icl_pll_power_enable(display, pll, enable_reg);
4063 
4064 	if (DISPLAY_VER(display) >= 12)
4065 		dkl_pll_write(display, pll, hw_state);
4066 	else
4067 		icl_mg_pll_write(display, pll, hw_state);
4068 
4069 	/*
4070 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4071 	 * paths should already be setting the appropriate voltage, hence we do
4072 	 * nothing here.
4073 	 */
4074 
4075 	icl_pll_enable(display, pll, enable_reg);
4076 
4077 	/* DVFS post sequence would be here. See the comment above. */
4078 }
4079 
4080 static void icl_pll_disable(struct intel_display *display,
4081 			    struct intel_dpll *pll,
4082 			    i915_reg_t enable_reg)
4083 {
4084 	/* The first steps are done by intel_ddi_post_disable(). */
4085 
4086 	/*
4087 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4088 	 * paths should already be setting the appropriate voltage, hence we do
4089 	 * nothing here.
4090 	 */
4091 
4092 	intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
4093 
4094 	/* Timeout is actually 1us. */
4095 	if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 1))
4096 		drm_err(display->drm, "PLL %d locked\n", pll->info->id);
4097 
4098 	/* DVFS post sequence would be here. See the comment above. */
4099 
4100 	intel_de_rmw(display, enable_reg, PLL_POWER_ENABLE, 0);
4101 
4102 	/*
4103 	 * The spec says we need to "wait" but it also says it should be
4104 	 * immediate.
4105 	 */
4106 	if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_POWER_STATE, 1))
4107 		drm_err(display->drm, "PLL %d Power not disabled\n",
4108 			pll->info->id);
4109 }
4110 
4111 static void combo_pll_disable(struct intel_display *display,
4112 			      struct intel_dpll *pll)
4113 {
4114 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
4115 
4116 	icl_pll_disable(display, pll, enable_reg);
4117 }
4118 
4119 static void icl_tbt_pll_disable(struct intel_display *display,
4120 				struct intel_dpll *pll)
4121 {
4122 	icl_pll_disable(display, pll, TBT_PLL_ENABLE);
4123 }
4124 
4125 static void mg_pll_disable(struct intel_display *display,
4126 			   struct intel_dpll *pll)
4127 {
4128 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4129 
4130 	icl_pll_disable(display, pll, enable_reg);
4131 }
4132 
4133 static void icl_update_dpll_ref_clks(struct intel_display *display)
4134 {
4135 	/* No SSC ref */
4136 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
4137 }
4138 
4139 static void icl_dump_hw_state(struct drm_printer *p,
4140 			      const struct intel_dpll_hw_state *dpll_hw_state)
4141 {
4142 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4143 
4144 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4145 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4146 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4147 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4148 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4149 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4150 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4151 		   hw_state->mg_refclkin_ctl,
4152 		   hw_state->mg_clktop2_coreclkctl1,
4153 		   hw_state->mg_clktop2_hsclkctl,
4154 		   hw_state->mg_pll_div0,
4155 		   hw_state->mg_pll_div1,
4156 		   hw_state->mg_pll_lf,
4157 		   hw_state->mg_pll_frac_lock,
4158 		   hw_state->mg_pll_ssc,
4159 		   hw_state->mg_pll_bias,
4160 		   hw_state->mg_pll_tdc_coldst_bias);
4161 }
4162 
4163 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4164 				 const struct intel_dpll_hw_state *_b)
4165 {
4166 	const struct icl_dpll_hw_state *a = &_a->icl;
4167 	const struct icl_dpll_hw_state *b = &_b->icl;
4168 
4169 	/* FIXME split combo vs. mg more thoroughly */
4170 	return a->cfgcr0 == b->cfgcr0 &&
4171 		a->cfgcr1 == b->cfgcr1 &&
4172 		a->div0 == b->div0 &&
4173 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4174 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4175 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4176 		a->mg_pll_div0 == b->mg_pll_div0 &&
4177 		a->mg_pll_div1 == b->mg_pll_div1 &&
4178 		a->mg_pll_lf == b->mg_pll_lf &&
4179 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4180 		a->mg_pll_ssc == b->mg_pll_ssc &&
4181 		a->mg_pll_bias == b->mg_pll_bias &&
4182 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4183 }
4184 
4185 static const struct intel_dpll_funcs combo_pll_funcs = {
4186 	.enable = combo_pll_enable,
4187 	.disable = combo_pll_disable,
4188 	.get_hw_state = combo_pll_get_hw_state,
4189 	.get_freq = icl_ddi_combo_pll_get_freq,
4190 };
4191 
4192 static const struct intel_dpll_funcs icl_tbt_pll_funcs = {
4193 	.enable = icl_tbt_pll_enable,
4194 	.disable = icl_tbt_pll_disable,
4195 	.get_hw_state = icl_tbt_pll_get_hw_state,
4196 	.get_freq = icl_ddi_tbt_pll_get_freq,
4197 };
4198 
4199 static const struct intel_dpll_funcs mg_pll_funcs = {
4200 	.enable = mg_pll_enable,
4201 	.disable = mg_pll_disable,
4202 	.get_hw_state = mg_pll_get_hw_state,
4203 	.get_freq = icl_ddi_mg_pll_get_freq,
4204 };
4205 
4206 static const struct dpll_info icl_plls[] = {
4207 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4208 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4209 	{ .name = "TBT PLL", .funcs = &icl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4210 	  .is_alt_port_dpll = true, },
4211 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4212 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4213 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4214 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4215 	{}
4216 };
4217 
4218 static const struct intel_dpll_mgr icl_pll_mgr = {
4219 	.dpll_info = icl_plls,
4220 	.compute_dplls = icl_compute_dplls,
4221 	.get_dplls = icl_get_dplls,
4222 	.put_dplls = icl_put_dplls,
4223 	.update_active_dpll = icl_update_active_dpll,
4224 	.update_ref_clks = icl_update_dpll_ref_clks,
4225 	.dump_hw_state = icl_dump_hw_state,
4226 	.compare_hw_state = icl_compare_hw_state,
4227 };
4228 
4229 static const struct dpll_info ehl_plls[] = {
4230 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4231 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4232 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4233 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4234 	{}
4235 };
4236 
4237 static const struct intel_dpll_mgr ehl_pll_mgr = {
4238 	.dpll_info = ehl_plls,
4239 	.compute_dplls = icl_compute_dplls,
4240 	.get_dplls = icl_get_dplls,
4241 	.put_dplls = icl_put_dplls,
4242 	.update_ref_clks = icl_update_dpll_ref_clks,
4243 	.dump_hw_state = icl_dump_hw_state,
4244 	.compare_hw_state = icl_compare_hw_state,
4245 };
4246 
4247 static const struct intel_dpll_funcs dkl_pll_funcs = {
4248 	.enable = mg_pll_enable,
4249 	.disable = mg_pll_disable,
4250 	.get_hw_state = dkl_pll_get_hw_state,
4251 	.get_freq = icl_ddi_mg_pll_get_freq,
4252 };
4253 
4254 static const struct dpll_info tgl_plls[] = {
4255 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4256 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4257 	{ .name = "TBT PLL", .funcs = &icl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4258 	  .is_alt_port_dpll = true, },
4259 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4260 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4261 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4262 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4263 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4264 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4265 	{}
4266 };
4267 
4268 static const struct intel_dpll_mgr tgl_pll_mgr = {
4269 	.dpll_info = tgl_plls,
4270 	.compute_dplls = icl_compute_dplls,
4271 	.get_dplls = icl_get_dplls,
4272 	.put_dplls = icl_put_dplls,
4273 	.update_active_dpll = icl_update_active_dpll,
4274 	.update_ref_clks = icl_update_dpll_ref_clks,
4275 	.dump_hw_state = icl_dump_hw_state,
4276 	.compare_hw_state = icl_compare_hw_state,
4277 };
4278 
4279 static const struct dpll_info rkl_plls[] = {
4280 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4281 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4282 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4283 	{}
4284 };
4285 
4286 static const struct intel_dpll_mgr rkl_pll_mgr = {
4287 	.dpll_info = rkl_plls,
4288 	.compute_dplls = icl_compute_dplls,
4289 	.get_dplls = icl_get_dplls,
4290 	.put_dplls = icl_put_dplls,
4291 	.update_ref_clks = icl_update_dpll_ref_clks,
4292 	.dump_hw_state = icl_dump_hw_state,
4293 	.compare_hw_state = icl_compare_hw_state,
4294 };
4295 
4296 static const struct dpll_info dg1_plls[] = {
4297 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4298 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4299 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4300 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4301 	{}
4302 };
4303 
4304 static const struct intel_dpll_mgr dg1_pll_mgr = {
4305 	.dpll_info = dg1_plls,
4306 	.compute_dplls = icl_compute_dplls,
4307 	.get_dplls = icl_get_dplls,
4308 	.put_dplls = icl_put_dplls,
4309 	.update_ref_clks = icl_update_dpll_ref_clks,
4310 	.dump_hw_state = icl_dump_hw_state,
4311 	.compare_hw_state = icl_compare_hw_state,
4312 };
4313 
4314 static const struct dpll_info adls_plls[] = {
4315 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4316 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4317 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4318 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4319 	{}
4320 };
4321 
4322 static const struct intel_dpll_mgr adls_pll_mgr = {
4323 	.dpll_info = adls_plls,
4324 	.compute_dplls = icl_compute_dplls,
4325 	.get_dplls = icl_get_dplls,
4326 	.put_dplls = icl_put_dplls,
4327 	.update_ref_clks = icl_update_dpll_ref_clks,
4328 	.dump_hw_state = icl_dump_hw_state,
4329 	.compare_hw_state = icl_compare_hw_state,
4330 };
4331 
4332 static const struct dpll_info adlp_plls[] = {
4333 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4334 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4335 	{ .name = "TBT PLL", .funcs = &icl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4336 	  .is_alt_port_dpll = true, },
4337 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4338 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4339 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4340 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4341 	{}
4342 };
4343 
4344 static const struct intel_dpll_mgr adlp_pll_mgr = {
4345 	.dpll_info = adlp_plls,
4346 	.compute_dplls = icl_compute_dplls,
4347 	.get_dplls = icl_get_dplls,
4348 	.put_dplls = icl_put_dplls,
4349 	.update_active_dpll = icl_update_active_dpll,
4350 	.update_ref_clks = icl_update_dpll_ref_clks,
4351 	.dump_hw_state = icl_dump_hw_state,
4352 	.compare_hw_state = icl_compare_hw_state,
4353 };
4354 
4355 static struct intel_encoder *get_intel_encoder(struct intel_display *display,
4356 					       const struct intel_dpll *pll)
4357 {
4358 	struct intel_encoder *encoder;
4359 	enum intel_dpll_id mtl_id;
4360 
4361 	for_each_intel_encoder(display->drm, encoder) {
4362 		mtl_id = mtl_port_to_pll_id(display, encoder->port);
4363 
4364 		if (mtl_id == pll->info->id)
4365 			return encoder;
4366 	}
4367 
4368 	return NULL;
4369 }
4370 
4371 static bool mtl_pll_get_hw_state(struct intel_display *display,
4372 				 struct intel_dpll *pll,
4373 				 struct intel_dpll_hw_state *dpll_hw_state)
4374 {
4375 	struct intel_encoder *encoder = get_intel_encoder(display, pll);
4376 
4377 	if (!encoder)
4378 		return false;
4379 
4380 	return intel_cx0pll_readout_hw_state(encoder, &dpll_hw_state->cx0pll);
4381 }
4382 
4383 static int mtl_pll_get_freq(struct intel_display *display,
4384 			    const struct intel_dpll *pll,
4385 			    const struct intel_dpll_hw_state *dpll_hw_state)
4386 {
4387 	struct intel_encoder *encoder = get_intel_encoder(display, pll);
4388 
4389 	if (drm_WARN_ON(display->drm, !encoder))
4390 		return -EINVAL;
4391 
4392 	return intel_cx0pll_calc_port_clock(encoder, &dpll_hw_state->cx0pll);
4393 }
4394 
4395 static void mtl_pll_enable(struct intel_display *display,
4396 			   struct intel_dpll *pll,
4397 			   const struct intel_dpll_hw_state *dpll_hw_state)
4398 {
4399 	struct intel_encoder *encoder = get_intel_encoder(display, pll);
4400 
4401 	if (drm_WARN_ON(display->drm, !encoder))
4402 		return;
4403 
4404 	intel_mtl_pll_enable(encoder, pll, dpll_hw_state);
4405 }
4406 
4407 static void mtl_pll_disable(struct intel_display *display,
4408 			    struct intel_dpll *pll)
4409 {
4410 	struct intel_encoder *encoder = get_intel_encoder(display, pll);
4411 
4412 	if (drm_WARN_ON(display->drm, !encoder))
4413 		return;
4414 
4415 	intel_mtl_pll_disable(encoder);
4416 }
4417 
4418 static const struct intel_dpll_funcs mtl_pll_funcs = {
4419 	.enable = mtl_pll_enable,
4420 	.disable = mtl_pll_disable,
4421 	.get_hw_state = mtl_pll_get_hw_state,
4422 	.get_freq = mtl_pll_get_freq,
4423 };
4424 
4425 static void mtl_tbt_pll_enable(struct intel_display *display,
4426 			       struct intel_dpll *pll,
4427 			       const struct intel_dpll_hw_state *hw_state)
4428 {
4429 }
4430 
4431 static void mtl_tbt_pll_disable(struct intel_display *display,
4432 				struct intel_dpll *pll)
4433 {
4434 }
4435 
4436 static int mtl_tbt_pll_get_freq(struct intel_display *display,
4437 				const struct intel_dpll *pll,
4438 				const struct intel_dpll_hw_state *dpll_hw_state)
4439 {
4440 	/*
4441 	 * The PLL outputs multiple frequencies at the same time, selection is
4442 	 * made at DDI clock mux level.
4443 	 */
4444 	drm_WARN_ON(display->drm, 1);
4445 
4446 	return 0;
4447 }
4448 
4449 static const struct intel_dpll_funcs mtl_tbt_pll_funcs = {
4450 	.enable = mtl_tbt_pll_enable,
4451 	.disable = mtl_tbt_pll_disable,
4452 	.get_hw_state = intel_mtl_tbt_pll_readout_hw_state,
4453 	.get_freq = mtl_tbt_pll_get_freq,
4454 };
4455 
4456 static const struct dpll_info mtl_plls[] = {
4457 	{ .name = "DPLL 0", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4458 	{ .name = "DPLL 1", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4459 	{ .name = "TBT PLL", .funcs = &mtl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4460 	  .is_alt_port_dpll = true, .always_on = true },
4461 	{ .name = "TC PLL 1", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4462 	{ .name = "TC PLL 2", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4463 	{ .name = "TC PLL 3", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4464 	{ .name = "TC PLL 4", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4465 	{}
4466 };
4467 
4468 /*
4469  * Compute the state for either a C10 PHY PLL, or in the case of the PTL port B,
4470  * eDP on TypeC PHY case for a C20 PHY PLL.
4471  */
4472 static int mtl_compute_non_tc_phy_dpll(struct intel_atomic_state *state,
4473 				       struct intel_crtc *crtc,
4474 				       struct intel_encoder *encoder)
4475 {
4476 	struct intel_crtc_state *crtc_state =
4477 		intel_atomic_get_new_crtc_state(state, crtc);
4478 	struct icl_port_dpll *port_dpll =
4479 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
4480 	int ret;
4481 
4482 	ret = intel_cx0pll_calc_state(crtc_state, encoder, &port_dpll->hw_state);
4483 	if (ret)
4484 		return ret;
4485 
4486 	/* this is mainly for the fastset check */
4487 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
4488 
4489 	crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder,
4490 							      &port_dpll->hw_state.cx0pll);
4491 
4492 	return 0;
4493 }
4494 
4495 static int mtl_compute_tc_phy_dplls(struct intel_atomic_state *state,
4496 				    struct intel_crtc *crtc,
4497 				    struct intel_encoder *encoder)
4498 {
4499 	struct intel_crtc_state *crtc_state =
4500 		intel_atomic_get_new_crtc_state(state, crtc);
4501 	const struct intel_crtc_state *old_crtc_state =
4502 		intel_atomic_get_old_crtc_state(state, crtc);
4503 	struct icl_port_dpll *port_dpll;
4504 	int ret;
4505 
4506 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
4507 	intel_mtl_tbt_pll_calc_state(&port_dpll->hw_state);
4508 
4509 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
4510 	ret = intel_cx0pll_calc_state(crtc_state, encoder, &port_dpll->hw_state);
4511 	if (ret)
4512 		return ret;
4513 
4514 	/* this is mainly for the fastset check */
4515 	if (old_crtc_state->intel_dpll &&
4516 	    old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
4517 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
4518 	else
4519 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
4520 
4521 	crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder,
4522 							      &port_dpll->hw_state.cx0pll);
4523 
4524 	return 0;
4525 }
4526 
4527 static int mtl_compute_dplls(struct intel_atomic_state *state,
4528 			     struct intel_crtc *crtc,
4529 			     struct intel_encoder *encoder)
4530 {
4531 	if (intel_encoder_is_tc(encoder))
4532 		return mtl_compute_tc_phy_dplls(state, crtc, encoder);
4533 	else
4534 		return mtl_compute_non_tc_phy_dpll(state, crtc, encoder);
4535 }
4536 
4537 static int mtl_get_dplls(struct intel_atomic_state *state,
4538 			 struct intel_crtc *crtc,
4539 			 struct intel_encoder *encoder)
4540 {
4541 	if (intel_encoder_is_tc(encoder))
4542 		return icl_get_tc_phy_dplls(state, crtc, encoder);
4543 	else
4544 		return mtl_get_non_tc_phy_dpll(state, crtc, encoder);
4545 }
4546 
4547 static void mtl_dump_hw_state(struct drm_printer *p,
4548 			      const struct intel_dpll_hw_state *dpll_hw_state)
4549 {
4550 	intel_cx0pll_dump_hw_state(p, &dpll_hw_state->cx0pll);
4551 }
4552 
4553 static bool mtl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4554 				 const struct intel_dpll_hw_state *_b)
4555 {
4556 	const struct intel_cx0pll_state *a = &_a->cx0pll;
4557 	const struct intel_cx0pll_state *b = &_b->cx0pll;
4558 
4559 	return intel_cx0pll_compare_hw_state(a, b);
4560 }
4561 
4562 static const struct intel_dpll_mgr mtl_pll_mgr = {
4563 	.dpll_info = mtl_plls,
4564 	.compute_dplls = mtl_compute_dplls,
4565 	.get_dplls = mtl_get_dplls,
4566 	.put_dplls = icl_put_dplls,
4567 	.update_active_dpll = icl_update_active_dpll,
4568 	.update_ref_clks = icl_update_dpll_ref_clks,
4569 	.dump_hw_state = mtl_dump_hw_state,
4570 	.compare_hw_state = mtl_compare_hw_state,
4571 };
4572 
4573 /**
4574  * intel_dpll_init - Initialize DPLLs
4575  * @display: intel_display device
4576  *
4577  * Initialize DPLLs for @display.
4578  */
4579 void intel_dpll_init(struct intel_display *display)
4580 {
4581 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4582 	const struct dpll_info *dpll_info;
4583 	int i;
4584 
4585 	mutex_init(&display->dpll.lock);
4586 
4587 	if (DISPLAY_VER(display) >= 35 || display->platform.dg2)
4588 		/* No shared DPLLs on NVL or DG2; port PLLs are part of the PHY */
4589 		dpll_mgr = NULL;
4590 	else if (DISPLAY_VER(display) >= 14)
4591 		dpll_mgr = &mtl_pll_mgr;
4592 	else if (display->platform.alderlake_p)
4593 		dpll_mgr = &adlp_pll_mgr;
4594 	else if (display->platform.alderlake_s)
4595 		dpll_mgr = &adls_pll_mgr;
4596 	else if (display->platform.dg1)
4597 		dpll_mgr = &dg1_pll_mgr;
4598 	else if (display->platform.rocketlake)
4599 		dpll_mgr = &rkl_pll_mgr;
4600 	else if (DISPLAY_VER(display) >= 12)
4601 		dpll_mgr = &tgl_pll_mgr;
4602 	else if (display->platform.jasperlake || display->platform.elkhartlake)
4603 		dpll_mgr = &ehl_pll_mgr;
4604 	else if (DISPLAY_VER(display) >= 11)
4605 		dpll_mgr = &icl_pll_mgr;
4606 	else if (display->platform.geminilake || display->platform.broxton)
4607 		dpll_mgr = &bxt_pll_mgr;
4608 	else if (DISPLAY_VER(display) == 9)
4609 		dpll_mgr = &skl_pll_mgr;
4610 	else if (HAS_DDI(display))
4611 		dpll_mgr = &hsw_pll_mgr;
4612 	else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
4613 		dpll_mgr = &pch_pll_mgr;
4614 
4615 	if (!dpll_mgr)
4616 		return;
4617 
4618 	dpll_info = dpll_mgr->dpll_info;
4619 
4620 	for (i = 0; dpll_info[i].name; i++) {
4621 		if (drm_WARN_ON(display->drm,
4622 				i >= ARRAY_SIZE(display->dpll.dplls)))
4623 			break;
4624 
4625 		/* must fit into unsigned long bitmask on 32bit */
4626 		if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
4627 			break;
4628 
4629 		display->dpll.dplls[i].info = &dpll_info[i];
4630 		display->dpll.dplls[i].index = i;
4631 	}
4632 
4633 	display->dpll.mgr = dpll_mgr;
4634 	display->dpll.num_dpll = i;
4635 }
4636 
4637 /**
4638  * intel_dpll_compute - compute DPLL state CRTC and encoder combination
4639  * @state: atomic state
4640  * @crtc: CRTC to compute DPLLs for
4641  * @encoder: encoder
4642  *
4643  * This function computes the DPLL state for the given CRTC and encoder.
4644  *
4645  * The new configuration in the atomic commit @state is made effective by
4646  * calling intel_dpll_swap_state().
4647  *
4648  * Returns:
4649  * 0 on success, negative error code on failure.
4650  */
4651 int intel_dpll_compute(struct intel_atomic_state *state,
4652 		       struct intel_crtc *crtc,
4653 		       struct intel_encoder *encoder)
4654 {
4655 	struct intel_display *display = to_intel_display(state);
4656 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4657 
4658 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4659 		return -EINVAL;
4660 
4661 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4662 }
4663 
4664 /**
4665  * intel_dpll_reserve - reserve DPLLs for CRTC and encoder combination
4666  * @state: atomic state
4667  * @crtc: CRTC to reserve DPLLs for
4668  * @encoder: encoder
4669  *
4670  * This function reserves all required DPLLs for the given CRTC and encoder
4671  * combination in the current atomic commit @state and the new @crtc atomic
4672  * state.
4673  *
4674  * The new configuration in the atomic commit @state is made effective by
4675  * calling intel_dpll_swap_state().
4676  *
4677  * The reserved DPLLs should be released by calling
4678  * intel_dpll_release().
4679  *
4680  * Returns:
4681  * 0 if all required DPLLs were successfully reserved,
4682  * negative error code otherwise.
4683  */
4684 int intel_dpll_reserve(struct intel_atomic_state *state,
4685 		       struct intel_crtc *crtc,
4686 		       struct intel_encoder *encoder)
4687 {
4688 	struct intel_display *display = to_intel_display(state);
4689 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4690 
4691 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4692 		return -EINVAL;
4693 
4694 	return dpll_mgr->get_dplls(state, crtc, encoder);
4695 }
4696 
4697 /**
4698  * intel_dpll_release - end use of DPLLs by CRTC in atomic state
4699  * @state: atomic state
4700  * @crtc: crtc from which the DPLLs are to be released
4701  *
4702  * This function releases all DPLLs reserved by intel_dpll_reserve()
4703  * from the current atomic commit @state and the old @crtc atomic state.
4704  *
4705  * The new configuration in the atomic commit @state is made effective by
4706  * calling intel_dpll_swap_state().
4707  */
4708 void intel_dpll_release(struct intel_atomic_state *state,
4709 			struct intel_crtc *crtc)
4710 {
4711 	struct intel_display *display = to_intel_display(state);
4712 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4713 
4714 	/*
4715 	 * FIXME: this function is called for every platform having a
4716 	 * compute_clock hook, even though the platform doesn't yet support
4717 	 * the DPLL framework and intel_dpll_reserve() is not
4718 	 * called on those.
4719 	 */
4720 	if (!dpll_mgr)
4721 		return;
4722 
4723 	dpll_mgr->put_dplls(state, crtc);
4724 }
4725 
4726 /**
4727  * intel_dpll_update_active - update the active DPLL for a CRTC/encoder
4728  * @state: atomic state
4729  * @crtc: the CRTC for which to update the active DPLL
4730  * @encoder: encoder determining the type of port DPLL
4731  *
4732  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4733  * from the port DPLLs reserved previously by intel_dpll_reserve(). The
4734  * DPLL selected will be based on the current mode of the encoder's port.
4735  */
4736 void intel_dpll_update_active(struct intel_atomic_state *state,
4737 			      struct intel_crtc *crtc,
4738 			      struct intel_encoder *encoder)
4739 {
4740 	struct intel_display *display = to_intel_display(encoder);
4741 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4742 
4743 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4744 		return;
4745 
4746 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4747 }
4748 
4749 /**
4750  * intel_dpll_get_freq - calculate the DPLL's output frequency
4751  * @display: intel_display device
4752  * @pll: DPLL for which to calculate the output frequency
4753  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4754  *
4755  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4756  */
4757 int intel_dpll_get_freq(struct intel_display *display,
4758 			const struct intel_dpll *pll,
4759 			const struct intel_dpll_hw_state *dpll_hw_state)
4760 {
4761 	if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
4762 		return 0;
4763 
4764 	return pll->info->funcs->get_freq(display, pll, dpll_hw_state);
4765 }
4766 
4767 /**
4768  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4769  * @display: intel_display device instance
4770  * @pll: DPLL for which to calculate the output frequency
4771  * @dpll_hw_state: DPLL's hardware state
4772  *
4773  * Read out @pll's hardware state into @dpll_hw_state.
4774  */
4775 bool intel_dpll_get_hw_state(struct intel_display *display,
4776 			     struct intel_dpll *pll,
4777 			     struct intel_dpll_hw_state *dpll_hw_state)
4778 {
4779 	return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
4780 }
4781 
4782 static void readout_dpll_hw_state(struct intel_display *display,
4783 				  struct intel_dpll *pll)
4784 {
4785 	struct intel_crtc *crtc;
4786 
4787 	pll->on = intel_dpll_get_hw_state(display, pll, &pll->state.hw_state);
4788 
4789 	if (pll->on && pll->info->power_domain)
4790 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
4791 
4792 	pll->state.pipe_mask = 0;
4793 	for_each_intel_crtc(display->drm, crtc) {
4794 		struct intel_crtc_state *crtc_state =
4795 			to_intel_crtc_state(crtc->base.state);
4796 
4797 		if (crtc_state->hw.active && crtc_state->intel_dpll == pll)
4798 			intel_dpll_crtc_get(crtc, pll, &pll->state);
4799 	}
4800 	pll->active_mask = pll->state.pipe_mask;
4801 
4802 	drm_dbg_kms(display->drm,
4803 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4804 		    pll->info->name, pll->state.pipe_mask, pll->on);
4805 }
4806 
4807 void intel_dpll_update_ref_clks(struct intel_display *display)
4808 {
4809 	if (display->dpll.mgr && display->dpll.mgr->update_ref_clks)
4810 		display->dpll.mgr->update_ref_clks(display);
4811 }
4812 
4813 void intel_dpll_readout_hw_state(struct intel_display *display)
4814 {
4815 	struct intel_dpll *pll;
4816 	int i;
4817 
4818 	for_each_dpll(display, pll, i)
4819 		readout_dpll_hw_state(display, pll);
4820 }
4821 
4822 static void sanitize_dpll_state(struct intel_display *display,
4823 				struct intel_dpll *pll)
4824 {
4825 	if (!pll->on)
4826 		return;
4827 
4828 	adlp_cmtg_clock_gating_wa(display, pll);
4829 
4830 	if (pll->active_mask)
4831 		return;
4832 
4833 	drm_dbg_kms(display->drm,
4834 		    "%s enabled but not in use, disabling\n",
4835 		    pll->info->name);
4836 
4837 	_intel_disable_shared_dpll(display, pll);
4838 }
4839 
4840 void intel_dpll_sanitize_state(struct intel_display *display)
4841 {
4842 	struct intel_dpll *pll;
4843 	int i;
4844 
4845 	intel_cx0_pll_power_save_wa(display);
4846 
4847 	for_each_dpll(display, pll, i)
4848 		sanitize_dpll_state(display, pll);
4849 }
4850 
4851 /**
4852  * intel_dpll_dump_hw_state - dump hw_state
4853  * @display: intel_display structure
4854  * @p: where to print the state to
4855  * @dpll_hw_state: hw state to be dumped
4856  *
4857  * Dumo out the relevant values in @dpll_hw_state.
4858  */
4859 void intel_dpll_dump_hw_state(struct intel_display *display,
4860 			      struct drm_printer *p,
4861 			      const struct intel_dpll_hw_state *dpll_hw_state)
4862 {
4863 	if (display->dpll.mgr) {
4864 		display->dpll.mgr->dump_hw_state(p, dpll_hw_state);
4865 	} else {
4866 		/* fallback for platforms that don't use the shared dpll
4867 		 * infrastructure
4868 		 */
4869 		ibx_dump_hw_state(p, dpll_hw_state);
4870 	}
4871 }
4872 
4873 /**
4874  * intel_dpll_compare_hw_state - compare the two states
4875  * @display: intel_display structure
4876  * @a: first DPLL hw state
4877  * @b: second DPLL hw state
4878  *
4879  * Compare DPLL hw states @a and @b.
4880  *
4881  * Returns: true if the states are equal, false if the differ
4882  */
4883 bool intel_dpll_compare_hw_state(struct intel_display *display,
4884 				 const struct intel_dpll_hw_state *a,
4885 				 const struct intel_dpll_hw_state *b)
4886 {
4887 	if (display->dpll.mgr) {
4888 		return display->dpll.mgr->compare_hw_state(a, b);
4889 	} else {
4890 		/* fallback for platforms that don't use the shared dpll
4891 		 * infrastructure
4892 		 */
4893 		return ibx_compare_hw_state(a, b);
4894 	}
4895 }
4896 
4897 static void
4898 verify_single_dpll_state(struct intel_display *display,
4899 			 struct intel_dpll *pll,
4900 			 struct intel_crtc *crtc,
4901 			 const struct intel_crtc_state *new_crtc_state)
4902 {
4903 	struct intel_dpll_hw_state dpll_hw_state = {};
4904 	u8 pipe_mask;
4905 	bool active;
4906 
4907 	active = intel_dpll_get_hw_state(display, pll, &dpll_hw_state);
4908 
4909 	if (!pll->info->always_on) {
4910 		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4911 					 "%s: pll in active use but not on in sw tracking\n",
4912 					 pll->info->name);
4913 		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4914 					 "%s: pll is on but not used by any active pipe\n",
4915 					 pll->info->name);
4916 		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4917 					 "%s: pll on state mismatch (expected %i, found %i)\n",
4918 					 pll->info->name, pll->on, active);
4919 	}
4920 
4921 	if (!crtc) {
4922 		INTEL_DISPLAY_STATE_WARN(display,
4923 					 pll->active_mask & ~pll->state.pipe_mask,
4924 					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4925 					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4926 
4927 		return;
4928 	}
4929 
4930 	pipe_mask = BIT(crtc->pipe);
4931 
4932 	if (new_crtc_state->hw.active)
4933 		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4934 					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4935 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4936 	else
4937 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4938 					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4939 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4940 
4941 	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4942 				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4943 				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4944 
4945 	if (INTEL_DISPLAY_STATE_WARN(display,
4946 				     pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4947 						       sizeof(dpll_hw_state)),
4948 				     "%s: pll hw state mismatch\n",
4949 				     pll->info->name)) {
4950 		struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
4951 
4952 		drm_printf(&p, "PLL %s HW state:\n", pll->info->name);
4953 		intel_dpll_dump_hw_state(display, &p, &dpll_hw_state);
4954 		drm_printf(&p, "PLL %s SW state:\n", pll->info->name);
4955 		intel_dpll_dump_hw_state(display, &p, &pll->state.hw_state);
4956 	}
4957 }
4958 
4959 static bool has_alt_port_dpll(const struct intel_dpll *old_pll,
4960 			      const struct intel_dpll *new_pll)
4961 {
4962 	return old_pll && new_pll && old_pll != new_pll &&
4963 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4964 }
4965 
4966 void intel_dpll_state_verify(struct intel_atomic_state *state,
4967 			     struct intel_crtc *crtc)
4968 {
4969 	struct intel_display *display = to_intel_display(state);
4970 	const struct intel_crtc_state *old_crtc_state =
4971 		intel_atomic_get_old_crtc_state(state, crtc);
4972 	const struct intel_crtc_state *new_crtc_state =
4973 		intel_atomic_get_new_crtc_state(state, crtc);
4974 
4975 	if (new_crtc_state->intel_dpll)
4976 		verify_single_dpll_state(display, new_crtc_state->intel_dpll,
4977 					 crtc, new_crtc_state);
4978 
4979 	if (old_crtc_state->intel_dpll &&
4980 	    old_crtc_state->intel_dpll != new_crtc_state->intel_dpll) {
4981 		u8 pipe_mask = BIT(crtc->pipe);
4982 		struct intel_dpll *pll = old_crtc_state->intel_dpll;
4983 
4984 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4985 					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4986 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4987 
4988 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4989 		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->intel_dpll,
4990 								     new_crtc_state->intel_dpll) &&
4991 					 pll->state.pipe_mask & pipe_mask,
4992 					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4993 					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4994 	}
4995 }
4996 
4997 void intel_dpll_verify_disabled(struct intel_atomic_state *state)
4998 {
4999 	struct intel_display *display = to_intel_display(state);
5000 	struct intel_dpll *pll;
5001 	int i;
5002 
5003 	for_each_dpll(display, pll, i)
5004 		verify_single_dpll_state(display, pll, NULL, NULL);
5005 }
5006