xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include <drm/drm_print.h>
28 
29 #include "bxt_dpio_phy_regs.h"
30 #include "i915_utils.h"
31 #include "intel_cx0_phy.h"
32 #include "intel_de.h"
33 #include "intel_display_regs.h"
34 #include "intel_display_types.h"
35 #include "intel_dkl_phy.h"
36 #include "intel_dkl_phy_regs.h"
37 #include "intel_dpio_phy.h"
38 #include "intel_dpll.h"
39 #include "intel_dpll_mgr.h"
40 #include "intel_hti.h"
41 #include "intel_mg_phy_regs.h"
42 #include "intel_pch_refclk.h"
43 #include "intel_step.h"
44 #include "intel_tc.h"
45 
46 /**
47  * DOC: Display PLLs
48  *
49  * Display PLLs used for driving outputs vary by platform. While some have
50  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
51  * from a pool. In the latter scenario, it is possible that multiple pipes
52  * share a PLL if their configurations match.
53  *
54  * This file provides an abstraction over display PLLs. The function
55  * intel_dpll_init() initializes the PLLs for the given platform.  The
56  * users of a PLL are tracked and that tracking is integrated with the atomic
57  * modset interface. During an atomic operation, required PLLs can be reserved
58  * for a given CRTC and encoder configuration by calling
59  * intel_dpll_reserve() and previously reserved PLLs can be released
60  * with intel_dpll_release().
61  * Changes to the users are first staged in the atomic state, and then made
62  * effective by calling intel_dpll_swap_state() during the atomic
63  * commit phase.
64  */
65 
66 /* platform specific hooks for managing DPLLs */
67 struct intel_dpll_funcs {
68 	/*
69 	 * Hook for enabling the pll, called from intel_enable_dpll() if
70 	 * the pll is not already enabled.
71 	 */
72 	void (*enable)(struct intel_display *display,
73 		       struct intel_dpll *pll,
74 		       const struct intel_dpll_hw_state *dpll_hw_state);
75 
76 	/*
77 	 * Hook for disabling the pll, called from intel_disable_dpll()
78 	 * only when it is safe to disable the pll, i.e., there are no more
79 	 * tracked users for it.
80 	 */
81 	void (*disable)(struct intel_display *display,
82 			struct intel_dpll *pll);
83 
84 	/*
85 	 * Hook for reading the values currently programmed to the DPLL
86 	 * registers. This is used for initial hw state readout and state
87 	 * verification after a mode set.
88 	 */
89 	bool (*get_hw_state)(struct intel_display *display,
90 			     struct intel_dpll *pll,
91 			     struct intel_dpll_hw_state *dpll_hw_state);
92 
93 	/*
94 	 * Hook for calculating the pll's output frequency based on its passed
95 	 * in state.
96 	 */
97 	int (*get_freq)(struct intel_display *i915,
98 			const struct intel_dpll *pll,
99 			const struct intel_dpll_hw_state *dpll_hw_state);
100 };
101 
102 struct intel_dpll_mgr {
103 	const struct dpll_info *dpll_info;
104 
105 	int (*compute_dplls)(struct intel_atomic_state *state,
106 			     struct intel_crtc *crtc,
107 			     struct intel_encoder *encoder);
108 	int (*get_dplls)(struct intel_atomic_state *state,
109 			 struct intel_crtc *crtc,
110 			 struct intel_encoder *encoder);
111 	void (*put_dplls)(struct intel_atomic_state *state,
112 			  struct intel_crtc *crtc);
113 	void (*update_active_dpll)(struct intel_atomic_state *state,
114 				   struct intel_crtc *crtc,
115 				   struct intel_encoder *encoder);
116 	void (*update_ref_clks)(struct intel_display *display);
117 	void (*dump_hw_state)(struct drm_printer *p,
118 			      const struct intel_dpll_hw_state *dpll_hw_state);
119 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
120 				 const struct intel_dpll_hw_state *b);
121 };
122 
123 static void
124 intel_atomic_duplicate_dpll_state(struct intel_display *display,
125 				  struct intel_dpll_state *dpll_state)
126 {
127 	struct intel_dpll *pll;
128 	int i;
129 
130 	/* Copy dpll state */
131 	for_each_dpll(display, pll, i)
132 		dpll_state[pll->index] = pll->state;
133 }
134 
135 static struct intel_dpll_state *
136 intel_atomic_get_dpll_state(struct drm_atomic_state *s)
137 {
138 	struct intel_atomic_state *state = to_intel_atomic_state(s);
139 	struct intel_display *display = to_intel_display(state);
140 
141 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
142 
143 	if (!state->dpll_set) {
144 		state->dpll_set = true;
145 
146 		intel_atomic_duplicate_dpll_state(display,
147 						  state->dpll_state);
148 	}
149 
150 	return state->dpll_state;
151 }
152 
153 /**
154  * intel_get_dpll_by_id - get a DPLL given its id
155  * @display: intel_display device instance
156  * @id: pll id
157  *
158  * Returns:
159  * A pointer to the DPLL with @id
160  */
161 struct intel_dpll *
162 intel_get_dpll_by_id(struct intel_display *display,
163 		     enum intel_dpll_id id)
164 {
165 	struct intel_dpll *pll;
166 	int i;
167 
168 	for_each_dpll(display, pll, i) {
169 		if (pll->info->id == id)
170 			return pll;
171 	}
172 
173 	MISSING_CASE(id);
174 	return NULL;
175 }
176 
177 /* For ILK+ */
178 void assert_dpll(struct intel_display *display,
179 		 struct intel_dpll *pll,
180 		 bool state)
181 {
182 	bool cur_state;
183 	struct intel_dpll_hw_state hw_state;
184 
185 	if (drm_WARN(display->drm, !pll,
186 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
187 		return;
188 
189 	cur_state = intel_dpll_get_hw_state(display, pll, &hw_state);
190 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
191 				 "%s assertion failure (expected %s, current %s)\n",
192 				 pll->info->name, str_on_off(state),
193 				 str_on_off(cur_state));
194 }
195 
196 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
197 {
198 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
199 }
200 
201 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
202 {
203 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
204 }
205 
206 static i915_reg_t
207 intel_combo_pll_enable_reg(struct intel_display *display,
208 			   struct intel_dpll *pll)
209 {
210 	if (display->platform.dg1)
211 		return DG1_DPLL_ENABLE(pll->info->id);
212 	else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
213 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
214 		return MG_PLL_ENABLE(0);
215 
216 	return ICL_DPLL_ENABLE(pll->info->id);
217 }
218 
219 static i915_reg_t
220 intel_tc_pll_enable_reg(struct intel_display *display,
221 			struct intel_dpll *pll)
222 {
223 	const enum intel_dpll_id id = pll->info->id;
224 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
225 
226 	if (display->platform.alderlake_p)
227 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
228 
229 	return MG_PLL_ENABLE(tc_port);
230 }
231 
232 static void _intel_enable_shared_dpll(struct intel_display *display,
233 				      struct intel_dpll *pll)
234 {
235 	if (pll->info->power_domain)
236 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
237 
238 	pll->info->funcs->enable(display, pll, &pll->state.hw_state);
239 	pll->on = true;
240 }
241 
242 static void _intel_disable_shared_dpll(struct intel_display *display,
243 				       struct intel_dpll *pll)
244 {
245 	pll->info->funcs->disable(display, pll);
246 	pll->on = false;
247 
248 	if (pll->info->power_domain)
249 		intel_display_power_put(display, pll->info->power_domain, pll->wakeref);
250 }
251 
252 /**
253  * intel_dpll_enable - enable a CRTC's DPLL
254  * @crtc_state: CRTC, and its state, which has a DPLL
255  *
256  * Enable DPLL used by @crtc.
257  */
258 void intel_dpll_enable(const struct intel_crtc_state *crtc_state)
259 {
260 	struct intel_display *display = to_intel_display(crtc_state);
261 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
262 	struct intel_dpll *pll = crtc_state->intel_dpll;
263 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
264 	unsigned int old_mask;
265 
266 	if (drm_WARN_ON(display->drm, !pll))
267 		return;
268 
269 	mutex_lock(&display->dpll.lock);
270 	old_mask = pll->active_mask;
271 
272 	if (drm_WARN_ON(display->drm, !(pll->state.pipe_mask & pipe_mask)) ||
273 	    drm_WARN_ON(display->drm, pll->active_mask & pipe_mask))
274 		goto out;
275 
276 	pll->active_mask |= pipe_mask;
277 
278 	drm_dbg_kms(display->drm,
279 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
280 		    pll->info->name, pll->active_mask, pll->on,
281 		    crtc->base.base.id, crtc->base.name);
282 
283 	if (old_mask) {
284 		drm_WARN_ON(display->drm, !pll->on);
285 		assert_dpll_enabled(display, pll);
286 		goto out;
287 	}
288 	drm_WARN_ON(display->drm, pll->on);
289 
290 	drm_dbg_kms(display->drm, "enabling %s\n", pll->info->name);
291 
292 	_intel_enable_shared_dpll(display, pll);
293 
294 out:
295 	mutex_unlock(&display->dpll.lock);
296 }
297 
298 /**
299  * intel_dpll_disable - disable a CRTC's shared DPLL
300  * @crtc_state: CRTC, and its state, which has a shared DPLL
301  *
302  * Disable DPLL used by @crtc.
303  */
304 void intel_dpll_disable(const struct intel_crtc_state *crtc_state)
305 {
306 	struct intel_display *display = to_intel_display(crtc_state);
307 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
308 	struct intel_dpll *pll = crtc_state->intel_dpll;
309 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
310 
311 	/* PCH only available on ILK+ */
312 	if (DISPLAY_VER(display) < 5)
313 		return;
314 
315 	if (pll == NULL)
316 		return;
317 
318 	mutex_lock(&display->dpll.lock);
319 	if (drm_WARN(display->drm, !(pll->active_mask & pipe_mask),
320 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
321 		     crtc->base.base.id, crtc->base.name))
322 		goto out;
323 
324 	drm_dbg_kms(display->drm,
325 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
326 		    pll->info->name, pll->active_mask, pll->on,
327 		    crtc->base.base.id, crtc->base.name);
328 
329 	assert_dpll_enabled(display, pll);
330 	drm_WARN_ON(display->drm, !pll->on);
331 
332 	pll->active_mask &= ~pipe_mask;
333 	if (pll->active_mask)
334 		goto out;
335 
336 	drm_dbg_kms(display->drm, "disabling %s\n", pll->info->name);
337 
338 	_intel_disable_shared_dpll(display, pll);
339 
340 out:
341 	mutex_unlock(&display->dpll.lock);
342 }
343 
344 static unsigned long
345 intel_dpll_mask_all(struct intel_display *display)
346 {
347 	struct intel_dpll *pll;
348 	unsigned long dpll_mask = 0;
349 	int i;
350 
351 	for_each_dpll(display, pll, i) {
352 		drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
353 
354 		dpll_mask |= BIT(pll->info->id);
355 	}
356 
357 	return dpll_mask;
358 }
359 
360 static struct intel_dpll *
361 intel_find_dpll(struct intel_atomic_state *state,
362 		const struct intel_crtc *crtc,
363 		const struct intel_dpll_hw_state *dpll_hw_state,
364 		unsigned long dpll_mask)
365 {
366 	struct intel_display *display = to_intel_display(crtc);
367 	unsigned long dpll_mask_all = intel_dpll_mask_all(display);
368 	struct intel_dpll_state *dpll_state;
369 	struct intel_dpll *unused_pll = NULL;
370 	enum intel_dpll_id id;
371 
372 	dpll_state = intel_atomic_get_dpll_state(&state->base);
373 
374 	drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
375 
376 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
377 		struct intel_dpll *pll;
378 
379 		pll = intel_get_dpll_by_id(display, id);
380 		if (!pll)
381 			continue;
382 
383 		/* Only want to check enabled timings first */
384 		if (dpll_state[pll->index].pipe_mask == 0) {
385 			if (!unused_pll)
386 				unused_pll = pll;
387 			continue;
388 		}
389 
390 		if (memcmp(dpll_hw_state,
391 			   &dpll_state[pll->index].hw_state,
392 			   sizeof(*dpll_hw_state)) == 0) {
393 			drm_dbg_kms(display->drm,
394 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
395 				    crtc->base.base.id, crtc->base.name,
396 				    pll->info->name,
397 				    dpll_state[pll->index].pipe_mask,
398 				    pll->active_mask);
399 			return pll;
400 		}
401 	}
402 
403 	/* Ok no matching timings, maybe there's a free one? */
404 	if (unused_pll) {
405 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] allocated %s\n",
406 			    crtc->base.base.id, crtc->base.name,
407 			    unused_pll->info->name);
408 		return unused_pll;
409 	}
410 
411 	return NULL;
412 }
413 
414 /**
415  * intel_dpll_crtc_get - Get a DPLL reference for a CRTC
416  * @crtc: CRTC on which behalf the reference is taken
417  * @pll: DPLL for which the reference is taken
418  * @dpll_state: the DPLL atomic state in which the reference is tracked
419  *
420  * Take a reference for @pll tracking the use of it by @crtc.
421  */
422 static void
423 intel_dpll_crtc_get(const struct intel_crtc *crtc,
424 		    const struct intel_dpll *pll,
425 		    struct intel_dpll_state *dpll_state)
426 {
427 	struct intel_display *display = to_intel_display(crtc);
428 
429 	drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
430 
431 	dpll_state->pipe_mask |= BIT(crtc->pipe);
432 
433 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
434 		    crtc->base.base.id, crtc->base.name, pll->info->name);
435 }
436 
437 static void
438 intel_reference_dpll(struct intel_atomic_state *state,
439 		     const struct intel_crtc *crtc,
440 		     const struct intel_dpll *pll,
441 		     const struct intel_dpll_hw_state *dpll_hw_state)
442 {
443 	struct intel_dpll_state *dpll_state;
444 
445 	dpll_state = intel_atomic_get_dpll_state(&state->base);
446 
447 	if (dpll_state[pll->index].pipe_mask == 0)
448 		dpll_state[pll->index].hw_state = *dpll_hw_state;
449 
450 	intel_dpll_crtc_get(crtc, pll, &dpll_state[pll->index]);
451 }
452 
453 /**
454  * intel_dpll_crtc_put - Drop a DPLL reference for a CRTC
455  * @crtc: CRTC on which behalf the reference is dropped
456  * @pll: DPLL for which the reference is dropped
457  * @dpll_state: the DPLL atomic state in which the reference is tracked
458  *
459  * Drop a reference for @pll tracking the end of use of it by @crtc.
460  */
461 void
462 intel_dpll_crtc_put(const struct intel_crtc *crtc,
463 		    const struct intel_dpll *pll,
464 		    struct intel_dpll_state *dpll_state)
465 {
466 	struct intel_display *display = to_intel_display(crtc);
467 
468 	drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
469 
470 	dpll_state->pipe_mask &= ~BIT(crtc->pipe);
471 
472 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
473 		    crtc->base.base.id, crtc->base.name, pll->info->name);
474 }
475 
476 static void intel_unreference_dpll(struct intel_atomic_state *state,
477 				   const struct intel_crtc *crtc,
478 				   const struct intel_dpll *pll)
479 {
480 	struct intel_dpll_state *dpll_state;
481 
482 	dpll_state = intel_atomic_get_dpll_state(&state->base);
483 
484 	intel_dpll_crtc_put(crtc, pll, &dpll_state[pll->index]);
485 }
486 
487 static void intel_put_dpll(struct intel_atomic_state *state,
488 			   struct intel_crtc *crtc)
489 {
490 	const struct intel_crtc_state *old_crtc_state =
491 		intel_atomic_get_old_crtc_state(state, crtc);
492 	struct intel_crtc_state *new_crtc_state =
493 		intel_atomic_get_new_crtc_state(state, crtc);
494 
495 	new_crtc_state->intel_dpll = NULL;
496 
497 	if (!old_crtc_state->intel_dpll)
498 		return;
499 
500 	intel_unreference_dpll(state, crtc, old_crtc_state->intel_dpll);
501 }
502 
503 /**
504  * intel_dpll_swap_state - make atomic DPLL configuration effective
505  * @state: atomic state
506  *
507  * This is the dpll version of drm_atomic_helper_swap_state() since the
508  * helper does not handle driver-specific global state.
509  *
510  * For consistency with atomic helpers this function does a complete swap,
511  * i.e. it also puts the current state into @state, even though there is no
512  * need for that at this moment.
513  */
514 void intel_dpll_swap_state(struct intel_atomic_state *state)
515 {
516 	struct intel_display *display = to_intel_display(state);
517 	struct intel_dpll_state *dpll_state = state->dpll_state;
518 	struct intel_dpll *pll;
519 	int i;
520 
521 	if (!state->dpll_set)
522 		return;
523 
524 	for_each_dpll(display, pll, i)
525 		swap(pll->state, dpll_state[pll->index]);
526 }
527 
528 static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
529 				      struct intel_dpll *pll,
530 				      struct intel_dpll_hw_state *dpll_hw_state)
531 {
532 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
533 	const enum intel_dpll_id id = pll->info->id;
534 	intel_wakeref_t wakeref;
535 	u32 val;
536 
537 	wakeref = intel_display_power_get_if_enabled(display,
538 						     POWER_DOMAIN_DISPLAY_CORE);
539 	if (!wakeref)
540 		return false;
541 
542 	val = intel_de_read(display, PCH_DPLL(id));
543 	hw_state->dpll = val;
544 	hw_state->fp0 = intel_de_read(display, PCH_FP0(id));
545 	hw_state->fp1 = intel_de_read(display, PCH_FP1(id));
546 
547 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
548 
549 	return val & DPLL_VCO_ENABLE;
550 }
551 
552 static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
553 {
554 	u32 val;
555 	bool enabled;
556 
557 	val = intel_de_read(display, PCH_DREF_CONTROL);
558 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
559 			    DREF_SUPERSPREAD_SOURCE_MASK));
560 	INTEL_DISPLAY_STATE_WARN(display, !enabled,
561 				 "PCH refclk assertion failure, should be active but is disabled\n");
562 }
563 
564 static void ibx_pch_dpll_enable(struct intel_display *display,
565 				struct intel_dpll *pll,
566 				const struct intel_dpll_hw_state *dpll_hw_state)
567 {
568 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
569 	const enum intel_dpll_id id = pll->info->id;
570 
571 	/* PCH refclock must be enabled first */
572 	ibx_assert_pch_refclk_enabled(display);
573 
574 	intel_de_write(display, PCH_FP0(id), hw_state->fp0);
575 	intel_de_write(display, PCH_FP1(id), hw_state->fp1);
576 
577 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
578 
579 	/* Wait for the clocks to stabilize. */
580 	intel_de_posting_read(display, PCH_DPLL(id));
581 	udelay(150);
582 
583 	/* The pixel multiplier can only be updated once the
584 	 * DPLL is enabled and the clocks are stable.
585 	 *
586 	 * So write it again.
587 	 */
588 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
589 	intel_de_posting_read(display, PCH_DPLL(id));
590 	udelay(200);
591 }
592 
593 static void ibx_pch_dpll_disable(struct intel_display *display,
594 				 struct intel_dpll *pll)
595 {
596 	const enum intel_dpll_id id = pll->info->id;
597 
598 	intel_de_write(display, PCH_DPLL(id), 0);
599 	intel_de_posting_read(display, PCH_DPLL(id));
600 	udelay(200);
601 }
602 
603 static int ibx_compute_dpll(struct intel_atomic_state *state,
604 			    struct intel_crtc *crtc,
605 			    struct intel_encoder *encoder)
606 {
607 	return 0;
608 }
609 
610 static int ibx_get_dpll(struct intel_atomic_state *state,
611 			struct intel_crtc *crtc,
612 			struct intel_encoder *encoder)
613 {
614 	struct intel_display *display = to_intel_display(state);
615 	struct intel_crtc_state *crtc_state =
616 		intel_atomic_get_new_crtc_state(state, crtc);
617 	struct intel_dpll *pll;
618 	enum intel_dpll_id id;
619 
620 	if (HAS_PCH_IBX(display)) {
621 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
622 		id = (enum intel_dpll_id) crtc->pipe;
623 		pll = intel_get_dpll_by_id(display, id);
624 
625 		drm_dbg_kms(display->drm,
626 			    "[CRTC:%d:%s] using pre-allocated %s\n",
627 			    crtc->base.base.id, crtc->base.name,
628 			    pll->info->name);
629 	} else {
630 		pll = intel_find_dpll(state, crtc,
631 				      &crtc_state->dpll_hw_state,
632 				      BIT(DPLL_ID_PCH_PLL_B) |
633 				      BIT(DPLL_ID_PCH_PLL_A));
634 	}
635 
636 	if (!pll)
637 		return -EINVAL;
638 
639 	/* reference the pll */
640 	intel_reference_dpll(state, crtc,
641 			     pll, &crtc_state->dpll_hw_state);
642 
643 	crtc_state->intel_dpll = pll;
644 
645 	return 0;
646 }
647 
648 static void ibx_dump_hw_state(struct drm_printer *p,
649 			      const struct intel_dpll_hw_state *dpll_hw_state)
650 {
651 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
652 
653 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
654 		   "fp0: 0x%x, fp1: 0x%x\n",
655 		   hw_state->dpll,
656 		   hw_state->dpll_md,
657 		   hw_state->fp0,
658 		   hw_state->fp1);
659 }
660 
661 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
662 				 const struct intel_dpll_hw_state *_b)
663 {
664 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
665 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
666 
667 	return a->dpll == b->dpll &&
668 		a->dpll_md == b->dpll_md &&
669 		a->fp0 == b->fp0 &&
670 		a->fp1 == b->fp1;
671 }
672 
673 static const struct intel_dpll_funcs ibx_pch_dpll_funcs = {
674 	.enable = ibx_pch_dpll_enable,
675 	.disable = ibx_pch_dpll_disable,
676 	.get_hw_state = ibx_pch_dpll_get_hw_state,
677 };
678 
679 static const struct dpll_info pch_plls[] = {
680 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
681 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
682 	{}
683 };
684 
685 static const struct intel_dpll_mgr pch_pll_mgr = {
686 	.dpll_info = pch_plls,
687 	.compute_dplls = ibx_compute_dpll,
688 	.get_dplls = ibx_get_dpll,
689 	.put_dplls = intel_put_dpll,
690 	.dump_hw_state = ibx_dump_hw_state,
691 	.compare_hw_state = ibx_compare_hw_state,
692 };
693 
694 static void hsw_ddi_wrpll_enable(struct intel_display *display,
695 				 struct intel_dpll *pll,
696 				 const struct intel_dpll_hw_state *dpll_hw_state)
697 {
698 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
699 	const enum intel_dpll_id id = pll->info->id;
700 
701 	intel_de_write(display, WRPLL_CTL(id), hw_state->wrpll);
702 	intel_de_posting_read(display, WRPLL_CTL(id));
703 	udelay(20);
704 }
705 
706 static void hsw_ddi_spll_enable(struct intel_display *display,
707 				struct intel_dpll *pll,
708 				const struct intel_dpll_hw_state *dpll_hw_state)
709 {
710 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
711 
712 	intel_de_write(display, SPLL_CTL, hw_state->spll);
713 	intel_de_posting_read(display, SPLL_CTL);
714 	udelay(20);
715 }
716 
717 static void hsw_ddi_wrpll_disable(struct intel_display *display,
718 				  struct intel_dpll *pll)
719 {
720 	const enum intel_dpll_id id = pll->info->id;
721 
722 	intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
723 	intel_de_posting_read(display, WRPLL_CTL(id));
724 
725 	/*
726 	 * Try to set up the PCH reference clock once all DPLLs
727 	 * that depend on it have been shut down.
728 	 */
729 	if (display->dpll.pch_ssc_use & BIT(id))
730 		intel_init_pch_refclk(display);
731 }
732 
733 static void hsw_ddi_spll_disable(struct intel_display *display,
734 				 struct intel_dpll *pll)
735 {
736 	enum intel_dpll_id id = pll->info->id;
737 
738 	intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
739 	intel_de_posting_read(display, SPLL_CTL);
740 
741 	/*
742 	 * Try to set up the PCH reference clock once all DPLLs
743 	 * that depend on it have been shut down.
744 	 */
745 	if (display->dpll.pch_ssc_use & BIT(id))
746 		intel_init_pch_refclk(display);
747 }
748 
749 static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
750 				       struct intel_dpll *pll,
751 				       struct intel_dpll_hw_state *dpll_hw_state)
752 {
753 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
754 	const enum intel_dpll_id id = pll->info->id;
755 	intel_wakeref_t wakeref;
756 	u32 val;
757 
758 	wakeref = intel_display_power_get_if_enabled(display,
759 						     POWER_DOMAIN_DISPLAY_CORE);
760 	if (!wakeref)
761 		return false;
762 
763 	val = intel_de_read(display, WRPLL_CTL(id));
764 	hw_state->wrpll = val;
765 
766 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
767 
768 	return val & WRPLL_PLL_ENABLE;
769 }
770 
771 static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
772 				      struct intel_dpll *pll,
773 				      struct intel_dpll_hw_state *dpll_hw_state)
774 {
775 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
776 	intel_wakeref_t wakeref;
777 	u32 val;
778 
779 	wakeref = intel_display_power_get_if_enabled(display,
780 						     POWER_DOMAIN_DISPLAY_CORE);
781 	if (!wakeref)
782 		return false;
783 
784 	val = intel_de_read(display, SPLL_CTL);
785 	hw_state->spll = val;
786 
787 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
788 
789 	return val & SPLL_PLL_ENABLE;
790 }
791 
792 #define LC_FREQ 2700
793 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
794 
795 #define P_MIN 2
796 #define P_MAX 64
797 #define P_INC 2
798 
799 /* Constraints for PLL good behavior */
800 #define REF_MIN 48
801 #define REF_MAX 400
802 #define VCO_MIN 2400
803 #define VCO_MAX 4800
804 
805 struct hsw_wrpll_rnp {
806 	unsigned p, n2, r2;
807 };
808 
809 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
810 {
811 	switch (clock) {
812 	case 25175000:
813 	case 25200000:
814 	case 27000000:
815 	case 27027000:
816 	case 37762500:
817 	case 37800000:
818 	case 40500000:
819 	case 40541000:
820 	case 54000000:
821 	case 54054000:
822 	case 59341000:
823 	case 59400000:
824 	case 72000000:
825 	case 74176000:
826 	case 74250000:
827 	case 81000000:
828 	case 81081000:
829 	case 89012000:
830 	case 89100000:
831 	case 108000000:
832 	case 108108000:
833 	case 111264000:
834 	case 111375000:
835 	case 148352000:
836 	case 148500000:
837 	case 162000000:
838 	case 162162000:
839 	case 222525000:
840 	case 222750000:
841 	case 296703000:
842 	case 297000000:
843 		return 0;
844 	case 233500000:
845 	case 245250000:
846 	case 247750000:
847 	case 253250000:
848 	case 298000000:
849 		return 1500;
850 	case 169128000:
851 	case 169500000:
852 	case 179500000:
853 	case 202000000:
854 		return 2000;
855 	case 256250000:
856 	case 262500000:
857 	case 270000000:
858 	case 272500000:
859 	case 273750000:
860 	case 280750000:
861 	case 281250000:
862 	case 286000000:
863 	case 291750000:
864 		return 4000;
865 	case 267250000:
866 	case 268500000:
867 		return 5000;
868 	default:
869 		return 1000;
870 	}
871 }
872 
873 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
874 				 unsigned int r2, unsigned int n2,
875 				 unsigned int p,
876 				 struct hsw_wrpll_rnp *best)
877 {
878 	u64 a, b, c, d, diff, diff_best;
879 
880 	/* No best (r,n,p) yet */
881 	if (best->p == 0) {
882 		best->p = p;
883 		best->n2 = n2;
884 		best->r2 = r2;
885 		return;
886 	}
887 
888 	/*
889 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
890 	 * freq2k.
891 	 *
892 	 * delta = 1e6 *
893 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
894 	 *	   freq2k;
895 	 *
896 	 * and we would like delta <= budget.
897 	 *
898 	 * If the discrepancy is above the PPM-based budget, always prefer to
899 	 * improve upon the previous solution.  However, if you're within the
900 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
901 	 */
902 	a = freq2k * budget * p * r2;
903 	b = freq2k * budget * best->p * best->r2;
904 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
905 	diff_best = abs_diff(freq2k * best->p * best->r2,
906 			     LC_FREQ_2K * best->n2);
907 	c = 1000000 * diff;
908 	d = 1000000 * diff_best;
909 
910 	if (a < c && b < d) {
911 		/* If both are above the budget, pick the closer */
912 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
913 			best->p = p;
914 			best->n2 = n2;
915 			best->r2 = r2;
916 		}
917 	} else if (a >= c && b < d) {
918 		/* If A is below the threshold but B is above it?  Update. */
919 		best->p = p;
920 		best->n2 = n2;
921 		best->r2 = r2;
922 	} else if (a >= c && b >= d) {
923 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
924 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
925 			best->p = p;
926 			best->n2 = n2;
927 			best->r2 = r2;
928 		}
929 	}
930 	/* Otherwise a < c && b >= d, do nothing */
931 }
932 
933 static void
934 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
935 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
936 {
937 	u64 freq2k;
938 	unsigned p, n2, r2;
939 	struct hsw_wrpll_rnp best = {};
940 	unsigned budget;
941 
942 	freq2k = clock / 100;
943 
944 	budget = hsw_wrpll_get_budget_for_freq(clock);
945 
946 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
947 	 * and directly pass the LC PLL to it. */
948 	if (freq2k == 5400000) {
949 		*n2_out = 2;
950 		*p_out = 1;
951 		*r2_out = 2;
952 		return;
953 	}
954 
955 	/*
956 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
957 	 * the WR PLL.
958 	 *
959 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
960 	 * Injecting R2 = 2 * R gives:
961 	 *   REF_MAX * r2 > LC_FREQ * 2 and
962 	 *   REF_MIN * r2 < LC_FREQ * 2
963 	 *
964 	 * Which means the desired boundaries for r2 are:
965 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
966 	 *
967 	 */
968 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
969 	     r2 <= LC_FREQ * 2 / REF_MIN;
970 	     r2++) {
971 
972 		/*
973 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
974 		 *
975 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
976 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
977 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
978 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
979 		 *
980 		 * Which means the desired boundaries for n2 are:
981 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
982 		 */
983 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
984 		     n2 <= VCO_MAX * r2 / LC_FREQ;
985 		     n2++) {
986 
987 			for (p = P_MIN; p <= P_MAX; p += P_INC)
988 				hsw_wrpll_update_rnp(freq2k, budget,
989 						     r2, n2, p, &best);
990 		}
991 	}
992 
993 	*n2_out = best.n2;
994 	*p_out = best.p;
995 	*r2_out = best.r2;
996 }
997 
998 static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
999 				  const struct intel_dpll *pll,
1000 				  const struct intel_dpll_hw_state *dpll_hw_state)
1001 {
1002 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1003 	int refclk;
1004 	int n, p, r;
1005 	u32 wrpll = hw_state->wrpll;
1006 
1007 	switch (wrpll & WRPLL_REF_MASK) {
1008 	case WRPLL_REF_SPECIAL_HSW:
1009 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1010 		if (display->platform.haswell && !display->platform.haswell_ult) {
1011 			refclk = display->dpll.ref_clks.nssc;
1012 			break;
1013 		}
1014 		fallthrough;
1015 	case WRPLL_REF_PCH_SSC:
1016 		/*
1017 		 * We could calculate spread here, but our checking
1018 		 * code only cares about 5% accuracy, and spread is a max of
1019 		 * 0.5% downspread.
1020 		 */
1021 		refclk = display->dpll.ref_clks.ssc;
1022 		break;
1023 	case WRPLL_REF_LCPLL:
1024 		refclk = 2700000;
1025 		break;
1026 	default:
1027 		MISSING_CASE(wrpll);
1028 		return 0;
1029 	}
1030 
1031 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1032 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1033 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1034 
1035 	/* Convert to KHz, p & r have a fixed point portion */
1036 	return (refclk * n / 10) / (p * r) * 2;
1037 }
1038 
1039 static int
1040 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1041 			   struct intel_crtc *crtc)
1042 {
1043 	struct intel_display *display = to_intel_display(state);
1044 	struct intel_crtc_state *crtc_state =
1045 		intel_atomic_get_new_crtc_state(state, crtc);
1046 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1047 	unsigned int p, n2, r2;
1048 
1049 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1050 
1051 	hw_state->wrpll =
1052 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1053 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1054 		WRPLL_DIVIDER_POST(p);
1055 
1056 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(display, NULL,
1057 							&crtc_state->dpll_hw_state);
1058 
1059 	return 0;
1060 }
1061 
1062 static struct intel_dpll *
1063 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1064 		       struct intel_crtc *crtc)
1065 {
1066 	struct intel_crtc_state *crtc_state =
1067 		intel_atomic_get_new_crtc_state(state, crtc);
1068 
1069 	return intel_find_dpll(state, crtc,
1070 				      &crtc_state->dpll_hw_state,
1071 				      BIT(DPLL_ID_WRPLL2) |
1072 				      BIT(DPLL_ID_WRPLL1));
1073 }
1074 
1075 static int
1076 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1077 {
1078 	struct intel_display *display = to_intel_display(crtc_state);
1079 	int clock = crtc_state->port_clock;
1080 
1081 	switch (clock / 2) {
1082 	case 81000:
1083 	case 135000:
1084 	case 270000:
1085 		return 0;
1086 	default:
1087 		drm_dbg_kms(display->drm, "Invalid clock for DP: %d\n",
1088 			    clock);
1089 		return -EINVAL;
1090 	}
1091 }
1092 
1093 static struct intel_dpll *
1094 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1095 {
1096 	struct intel_display *display = to_intel_display(crtc_state);
1097 	struct intel_dpll *pll;
1098 	enum intel_dpll_id pll_id;
1099 	int clock = crtc_state->port_clock;
1100 
1101 	switch (clock / 2) {
1102 	case 81000:
1103 		pll_id = DPLL_ID_LCPLL_810;
1104 		break;
1105 	case 135000:
1106 		pll_id = DPLL_ID_LCPLL_1350;
1107 		break;
1108 	case 270000:
1109 		pll_id = DPLL_ID_LCPLL_2700;
1110 		break;
1111 	default:
1112 		MISSING_CASE(clock / 2);
1113 		return NULL;
1114 	}
1115 
1116 	pll = intel_get_dpll_by_id(display, pll_id);
1117 
1118 	if (!pll)
1119 		return NULL;
1120 
1121 	return pll;
1122 }
1123 
1124 static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
1125 				  const struct intel_dpll *pll,
1126 				  const struct intel_dpll_hw_state *dpll_hw_state)
1127 {
1128 	int link_clock = 0;
1129 
1130 	switch (pll->info->id) {
1131 	case DPLL_ID_LCPLL_810:
1132 		link_clock = 81000;
1133 		break;
1134 	case DPLL_ID_LCPLL_1350:
1135 		link_clock = 135000;
1136 		break;
1137 	case DPLL_ID_LCPLL_2700:
1138 		link_clock = 270000;
1139 		break;
1140 	default:
1141 		drm_WARN(display->drm, 1, "bad port clock sel\n");
1142 		break;
1143 	}
1144 
1145 	return link_clock * 2;
1146 }
1147 
1148 static int
1149 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1150 			  struct intel_crtc *crtc)
1151 {
1152 	struct intel_crtc_state *crtc_state =
1153 		intel_atomic_get_new_crtc_state(state, crtc);
1154 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1155 
1156 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1157 		return -EINVAL;
1158 
1159 	hw_state->spll =
1160 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1161 
1162 	return 0;
1163 }
1164 
1165 static struct intel_dpll *
1166 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1167 		      struct intel_crtc *crtc)
1168 {
1169 	struct intel_crtc_state *crtc_state =
1170 		intel_atomic_get_new_crtc_state(state, crtc);
1171 
1172 	return intel_find_dpll(state, crtc, &crtc_state->dpll_hw_state,
1173 				      BIT(DPLL_ID_SPLL));
1174 }
1175 
1176 static int hsw_ddi_spll_get_freq(struct intel_display *display,
1177 				 const struct intel_dpll *pll,
1178 				 const struct intel_dpll_hw_state *dpll_hw_state)
1179 {
1180 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1181 	int link_clock = 0;
1182 
1183 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1184 	case SPLL_FREQ_810MHz:
1185 		link_clock = 81000;
1186 		break;
1187 	case SPLL_FREQ_1350MHz:
1188 		link_clock = 135000;
1189 		break;
1190 	case SPLL_FREQ_2700MHz:
1191 		link_clock = 270000;
1192 		break;
1193 	default:
1194 		drm_WARN(display->drm, 1, "bad spll freq\n");
1195 		break;
1196 	}
1197 
1198 	return link_clock * 2;
1199 }
1200 
1201 static int hsw_compute_dpll(struct intel_atomic_state *state,
1202 			    struct intel_crtc *crtc,
1203 			    struct intel_encoder *encoder)
1204 {
1205 	struct intel_crtc_state *crtc_state =
1206 		intel_atomic_get_new_crtc_state(state, crtc);
1207 
1208 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1209 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1210 	else if (intel_crtc_has_dp_encoder(crtc_state))
1211 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1212 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1213 		return hsw_ddi_spll_compute_dpll(state, crtc);
1214 	else
1215 		return -EINVAL;
1216 }
1217 
1218 static int hsw_get_dpll(struct intel_atomic_state *state,
1219 			struct intel_crtc *crtc,
1220 			struct intel_encoder *encoder)
1221 {
1222 	struct intel_crtc_state *crtc_state =
1223 		intel_atomic_get_new_crtc_state(state, crtc);
1224 	struct intel_dpll *pll = NULL;
1225 
1226 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1227 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1228 	else if (intel_crtc_has_dp_encoder(crtc_state))
1229 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1230 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1231 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1232 
1233 	if (!pll)
1234 		return -EINVAL;
1235 
1236 	intel_reference_dpll(state, crtc,
1237 			     pll, &crtc_state->dpll_hw_state);
1238 
1239 	crtc_state->intel_dpll = pll;
1240 
1241 	return 0;
1242 }
1243 
1244 static void hsw_update_dpll_ref_clks(struct intel_display *display)
1245 {
1246 	display->dpll.ref_clks.ssc = 135000;
1247 	/* Non-SSC is only used on non-ULT HSW. */
1248 	if (intel_de_read(display, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1249 		display->dpll.ref_clks.nssc = 24000;
1250 	else
1251 		display->dpll.ref_clks.nssc = 135000;
1252 }
1253 
1254 static void hsw_dump_hw_state(struct drm_printer *p,
1255 			      const struct intel_dpll_hw_state *dpll_hw_state)
1256 {
1257 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1258 
1259 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1260 		   hw_state->wrpll, hw_state->spll);
1261 }
1262 
1263 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1264 				 const struct intel_dpll_hw_state *_b)
1265 {
1266 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1267 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1268 
1269 	return a->wrpll == b->wrpll &&
1270 		a->spll == b->spll;
1271 }
1272 
1273 static const struct intel_dpll_funcs hsw_ddi_wrpll_funcs = {
1274 	.enable = hsw_ddi_wrpll_enable,
1275 	.disable = hsw_ddi_wrpll_disable,
1276 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1277 	.get_freq = hsw_ddi_wrpll_get_freq,
1278 };
1279 
1280 static const struct intel_dpll_funcs hsw_ddi_spll_funcs = {
1281 	.enable = hsw_ddi_spll_enable,
1282 	.disable = hsw_ddi_spll_disable,
1283 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1284 	.get_freq = hsw_ddi_spll_get_freq,
1285 };
1286 
1287 static void hsw_ddi_lcpll_enable(struct intel_display *display,
1288 				 struct intel_dpll *pll,
1289 				 const struct intel_dpll_hw_state *hw_state)
1290 {
1291 }
1292 
1293 static void hsw_ddi_lcpll_disable(struct intel_display *display,
1294 				  struct intel_dpll *pll)
1295 {
1296 }
1297 
1298 static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
1299 				       struct intel_dpll *pll,
1300 				       struct intel_dpll_hw_state *dpll_hw_state)
1301 {
1302 	return true;
1303 }
1304 
1305 static const struct intel_dpll_funcs hsw_ddi_lcpll_funcs = {
1306 	.enable = hsw_ddi_lcpll_enable,
1307 	.disable = hsw_ddi_lcpll_disable,
1308 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1309 	.get_freq = hsw_ddi_lcpll_get_freq,
1310 };
1311 
1312 static const struct dpll_info hsw_plls[] = {
1313 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1314 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1315 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1316 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1317 	  .always_on = true, },
1318 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1319 	  .always_on = true, },
1320 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1321 	  .always_on = true, },
1322 	{}
1323 };
1324 
1325 static const struct intel_dpll_mgr hsw_pll_mgr = {
1326 	.dpll_info = hsw_plls,
1327 	.compute_dplls = hsw_compute_dpll,
1328 	.get_dplls = hsw_get_dpll,
1329 	.put_dplls = intel_put_dpll,
1330 	.update_ref_clks = hsw_update_dpll_ref_clks,
1331 	.dump_hw_state = hsw_dump_hw_state,
1332 	.compare_hw_state = hsw_compare_hw_state,
1333 };
1334 
1335 struct skl_dpll_regs {
1336 	i915_reg_t ctl, cfgcr1, cfgcr2;
1337 };
1338 
1339 /* this array is indexed by the *shared* pll id */
1340 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1341 	{
1342 		/* DPLL 0 */
1343 		.ctl = LCPLL1_CTL,
1344 		/* DPLL 0 doesn't support HDMI mode */
1345 	},
1346 	{
1347 		/* DPLL 1 */
1348 		.ctl = LCPLL2_CTL,
1349 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1350 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1351 	},
1352 	{
1353 		/* DPLL 2 */
1354 		.ctl = WRPLL_CTL(0),
1355 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1356 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1357 	},
1358 	{
1359 		/* DPLL 3 */
1360 		.ctl = WRPLL_CTL(1),
1361 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1362 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1363 	},
1364 };
1365 
1366 static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
1367 				    struct intel_dpll *pll,
1368 				    const struct skl_dpll_hw_state *hw_state)
1369 {
1370 	const enum intel_dpll_id id = pll->info->id;
1371 
1372 	intel_de_rmw(display, DPLL_CTRL1,
1373 		     DPLL_CTRL1_HDMI_MODE(id) |
1374 		     DPLL_CTRL1_SSC(id) |
1375 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1376 		     hw_state->ctrl1 << (id * 6));
1377 	intel_de_posting_read(display, DPLL_CTRL1);
1378 }
1379 
1380 static void skl_ddi_pll_enable(struct intel_display *display,
1381 			       struct intel_dpll *pll,
1382 			       const struct intel_dpll_hw_state *dpll_hw_state)
1383 {
1384 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1385 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1386 	const enum intel_dpll_id id = pll->info->id;
1387 
1388 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1389 
1390 	intel_de_write(display, regs[id].cfgcr1, hw_state->cfgcr1);
1391 	intel_de_write(display, regs[id].cfgcr2, hw_state->cfgcr2);
1392 	intel_de_posting_read(display, regs[id].cfgcr1);
1393 	intel_de_posting_read(display, regs[id].cfgcr2);
1394 
1395 	/* the enable bit is always bit 31 */
1396 	intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1397 
1398 	if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5))
1399 		drm_err(display->drm, "DPLL %d not locked\n", id);
1400 }
1401 
1402 static void skl_ddi_dpll0_enable(struct intel_display *display,
1403 				 struct intel_dpll *pll,
1404 				 const struct intel_dpll_hw_state *dpll_hw_state)
1405 {
1406 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1407 
1408 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1409 }
1410 
1411 static void skl_ddi_pll_disable(struct intel_display *display,
1412 				struct intel_dpll *pll)
1413 {
1414 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1415 	const enum intel_dpll_id id = pll->info->id;
1416 
1417 	/* the enable bit is always bit 31 */
1418 	intel_de_rmw(display, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1419 	intel_de_posting_read(display, regs[id].ctl);
1420 }
1421 
1422 static void skl_ddi_dpll0_disable(struct intel_display *display,
1423 				  struct intel_dpll *pll)
1424 {
1425 }
1426 
1427 static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
1428 				     struct intel_dpll *pll,
1429 				     struct intel_dpll_hw_state *dpll_hw_state)
1430 {
1431 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1432 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1433 	const enum intel_dpll_id id = pll->info->id;
1434 	intel_wakeref_t wakeref;
1435 	bool ret;
1436 	u32 val;
1437 
1438 	wakeref = intel_display_power_get_if_enabled(display,
1439 						     POWER_DOMAIN_DISPLAY_CORE);
1440 	if (!wakeref)
1441 		return false;
1442 
1443 	ret = false;
1444 
1445 	val = intel_de_read(display, regs[id].ctl);
1446 	if (!(val & LCPLL_PLL_ENABLE))
1447 		goto out;
1448 
1449 	val = intel_de_read(display, DPLL_CTRL1);
1450 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1451 
1452 	/* avoid reading back stale values if HDMI mode is not enabled */
1453 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1454 		hw_state->cfgcr1 = intel_de_read(display, regs[id].cfgcr1);
1455 		hw_state->cfgcr2 = intel_de_read(display, regs[id].cfgcr2);
1456 	}
1457 	ret = true;
1458 
1459 out:
1460 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1461 
1462 	return ret;
1463 }
1464 
1465 static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
1466 				       struct intel_dpll *pll,
1467 				       struct intel_dpll_hw_state *dpll_hw_state)
1468 {
1469 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1470 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1471 	const enum intel_dpll_id id = pll->info->id;
1472 	intel_wakeref_t wakeref;
1473 	u32 val;
1474 	bool ret;
1475 
1476 	wakeref = intel_display_power_get_if_enabled(display,
1477 						     POWER_DOMAIN_DISPLAY_CORE);
1478 	if (!wakeref)
1479 		return false;
1480 
1481 	ret = false;
1482 
1483 	/* DPLL0 is always enabled since it drives CDCLK */
1484 	val = intel_de_read(display, regs[id].ctl);
1485 	if (drm_WARN_ON(display->drm, !(val & LCPLL_PLL_ENABLE)))
1486 		goto out;
1487 
1488 	val = intel_de_read(display, DPLL_CTRL1);
1489 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1490 
1491 	ret = true;
1492 
1493 out:
1494 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1495 
1496 	return ret;
1497 }
1498 
1499 struct skl_wrpll_context {
1500 	u64 min_deviation;		/* current minimal deviation */
1501 	u64 central_freq;		/* chosen central freq */
1502 	u64 dco_freq;			/* chosen dco freq */
1503 	unsigned int p;			/* chosen divider */
1504 };
1505 
1506 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1507 #define SKL_DCO_MAX_PDEVIATION	100
1508 #define SKL_DCO_MAX_NDEVIATION	600
1509 
1510 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1511 				  u64 central_freq,
1512 				  u64 dco_freq,
1513 				  unsigned int divider)
1514 {
1515 	u64 deviation;
1516 
1517 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1518 			      central_freq);
1519 
1520 	/* positive deviation */
1521 	if (dco_freq >= central_freq) {
1522 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1523 		    deviation < ctx->min_deviation) {
1524 			ctx->min_deviation = deviation;
1525 			ctx->central_freq = central_freq;
1526 			ctx->dco_freq = dco_freq;
1527 			ctx->p = divider;
1528 		}
1529 	/* negative deviation */
1530 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1531 		   deviation < ctx->min_deviation) {
1532 		ctx->min_deviation = deviation;
1533 		ctx->central_freq = central_freq;
1534 		ctx->dco_freq = dco_freq;
1535 		ctx->p = divider;
1536 	}
1537 }
1538 
1539 static void skl_wrpll_get_multipliers(unsigned int p,
1540 				      unsigned int *p0 /* out */,
1541 				      unsigned int *p1 /* out */,
1542 				      unsigned int *p2 /* out */)
1543 {
1544 	/* even dividers */
1545 	if (p % 2 == 0) {
1546 		unsigned int half = p / 2;
1547 
1548 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1549 			*p0 = 2;
1550 			*p1 = 1;
1551 			*p2 = half;
1552 		} else if (half % 2 == 0) {
1553 			*p0 = 2;
1554 			*p1 = half / 2;
1555 			*p2 = 2;
1556 		} else if (half % 3 == 0) {
1557 			*p0 = 3;
1558 			*p1 = half / 3;
1559 			*p2 = 2;
1560 		} else if (half % 7 == 0) {
1561 			*p0 = 7;
1562 			*p1 = half / 7;
1563 			*p2 = 2;
1564 		}
1565 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1566 		*p0 = 3;
1567 		*p1 = 1;
1568 		*p2 = p / 3;
1569 	} else if (p == 5 || p == 7) {
1570 		*p0 = p;
1571 		*p1 = 1;
1572 		*p2 = 1;
1573 	} else if (p == 15) {
1574 		*p0 = 3;
1575 		*p1 = 1;
1576 		*p2 = 5;
1577 	} else if (p == 21) {
1578 		*p0 = 7;
1579 		*p1 = 1;
1580 		*p2 = 3;
1581 	} else if (p == 35) {
1582 		*p0 = 7;
1583 		*p1 = 1;
1584 		*p2 = 5;
1585 	}
1586 }
1587 
1588 struct skl_wrpll_params {
1589 	u32 dco_fraction;
1590 	u32 dco_integer;
1591 	u32 qdiv_ratio;
1592 	u32 qdiv_mode;
1593 	u32 kdiv;
1594 	u32 pdiv;
1595 	u32 central_freq;
1596 };
1597 
1598 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1599 				      u64 afe_clock,
1600 				      int ref_clock,
1601 				      u64 central_freq,
1602 				      u32 p0, u32 p1, u32 p2)
1603 {
1604 	u64 dco_freq;
1605 
1606 	switch (central_freq) {
1607 	case 9600000000ULL:
1608 		params->central_freq = 0;
1609 		break;
1610 	case 9000000000ULL:
1611 		params->central_freq = 1;
1612 		break;
1613 	case 8400000000ULL:
1614 		params->central_freq = 3;
1615 	}
1616 
1617 	switch (p0) {
1618 	case 1:
1619 		params->pdiv = 0;
1620 		break;
1621 	case 2:
1622 		params->pdiv = 1;
1623 		break;
1624 	case 3:
1625 		params->pdiv = 2;
1626 		break;
1627 	case 7:
1628 		params->pdiv = 4;
1629 		break;
1630 	default:
1631 		WARN(1, "Incorrect PDiv\n");
1632 	}
1633 
1634 	switch (p2) {
1635 	case 5:
1636 		params->kdiv = 0;
1637 		break;
1638 	case 2:
1639 		params->kdiv = 1;
1640 		break;
1641 	case 3:
1642 		params->kdiv = 2;
1643 		break;
1644 	case 1:
1645 		params->kdiv = 3;
1646 		break;
1647 	default:
1648 		WARN(1, "Incorrect KDiv\n");
1649 	}
1650 
1651 	params->qdiv_ratio = p1;
1652 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1653 
1654 	dco_freq = p0 * p1 * p2 * afe_clock;
1655 
1656 	/*
1657 	 * Intermediate values are in Hz.
1658 	 * Divide by MHz to match bsepc
1659 	 */
1660 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1661 	params->dco_fraction =
1662 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1663 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1664 }
1665 
1666 static int
1667 skl_ddi_calculate_wrpll(int clock,
1668 			int ref_clock,
1669 			struct skl_wrpll_params *wrpll_params)
1670 {
1671 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1672 						 9000000000ULL,
1673 						 9600000000ULL };
1674 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1675 					    24, 28, 30, 32, 36, 40, 42, 44,
1676 					    48, 52, 54, 56, 60, 64, 66, 68,
1677 					    70, 72, 76, 78, 80, 84, 88, 90,
1678 					    92, 96, 98 };
1679 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1680 	static const struct {
1681 		const u8 *list;
1682 		int n_dividers;
1683 	} dividers[] = {
1684 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1685 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1686 	};
1687 	struct skl_wrpll_context ctx = {
1688 		.min_deviation = U64_MAX,
1689 	};
1690 	unsigned int dco, d, i;
1691 	unsigned int p0, p1, p2;
1692 	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1693 
1694 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1695 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1696 			for (i = 0; i < dividers[d].n_dividers; i++) {
1697 				unsigned int p = dividers[d].list[i];
1698 				u64 dco_freq = p * afe_clock;
1699 
1700 				skl_wrpll_try_divider(&ctx,
1701 						      dco_central_freq[dco],
1702 						      dco_freq,
1703 						      p);
1704 				/*
1705 				 * Skip the remaining dividers if we're sure to
1706 				 * have found the definitive divider, we can't
1707 				 * improve a 0 deviation.
1708 				 */
1709 				if (ctx.min_deviation == 0)
1710 					goto skip_remaining_dividers;
1711 			}
1712 		}
1713 
1714 skip_remaining_dividers:
1715 		/*
1716 		 * If a solution is found with an even divider, prefer
1717 		 * this one.
1718 		 */
1719 		if (d == 0 && ctx.p)
1720 			break;
1721 	}
1722 
1723 	if (!ctx.p)
1724 		return -EINVAL;
1725 
1726 	/*
1727 	 * gcc incorrectly analyses that these can be used without being
1728 	 * initialized. To be fair, it's hard to guess.
1729 	 */
1730 	p0 = p1 = p2 = 0;
1731 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1732 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1733 				  ctx.central_freq, p0, p1, p2);
1734 
1735 	return 0;
1736 }
1737 
1738 static int skl_ddi_wrpll_get_freq(struct intel_display *display,
1739 				  const struct intel_dpll *pll,
1740 				  const struct intel_dpll_hw_state *dpll_hw_state)
1741 {
1742 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1743 	int ref_clock = display->dpll.ref_clks.nssc;
1744 	u32 p0, p1, p2, dco_freq;
1745 
1746 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1747 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1748 
1749 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1750 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1751 	else
1752 		p1 = 1;
1753 
1754 
1755 	switch (p0) {
1756 	case DPLL_CFGCR2_PDIV_1:
1757 		p0 = 1;
1758 		break;
1759 	case DPLL_CFGCR2_PDIV_2:
1760 		p0 = 2;
1761 		break;
1762 	case DPLL_CFGCR2_PDIV_3:
1763 		p0 = 3;
1764 		break;
1765 	case DPLL_CFGCR2_PDIV_7_INVALID:
1766 		/*
1767 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1768 		 * handling it the same way as PDIV_7.
1769 		 */
1770 		drm_dbg_kms(display->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1771 		fallthrough;
1772 	case DPLL_CFGCR2_PDIV_7:
1773 		p0 = 7;
1774 		break;
1775 	default:
1776 		MISSING_CASE(p0);
1777 		return 0;
1778 	}
1779 
1780 	switch (p2) {
1781 	case DPLL_CFGCR2_KDIV_5:
1782 		p2 = 5;
1783 		break;
1784 	case DPLL_CFGCR2_KDIV_2:
1785 		p2 = 2;
1786 		break;
1787 	case DPLL_CFGCR2_KDIV_3:
1788 		p2 = 3;
1789 		break;
1790 	case DPLL_CFGCR2_KDIV_1:
1791 		p2 = 1;
1792 		break;
1793 	default:
1794 		MISSING_CASE(p2);
1795 		return 0;
1796 	}
1797 
1798 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1799 		   ref_clock;
1800 
1801 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1802 		    ref_clock / 0x8000;
1803 
1804 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
1805 		return 0;
1806 
1807 	return dco_freq / (p0 * p1 * p2 * 5);
1808 }
1809 
1810 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1811 {
1812 	struct intel_display *display = to_intel_display(crtc_state);
1813 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1814 	struct skl_wrpll_params wrpll_params = {};
1815 	int ret;
1816 
1817 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1818 				      display->dpll.ref_clks.nssc, &wrpll_params);
1819 	if (ret)
1820 		return ret;
1821 
1822 	/*
1823 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1824 	 * as the DPLL id in this function.
1825 	 */
1826 	hw_state->ctrl1 =
1827 		DPLL_CTRL1_OVERRIDE(0) |
1828 		DPLL_CTRL1_HDMI_MODE(0);
1829 
1830 	hw_state->cfgcr1 =
1831 		DPLL_CFGCR1_FREQ_ENABLE |
1832 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1833 		wrpll_params.dco_integer;
1834 
1835 	hw_state->cfgcr2 =
1836 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1837 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1838 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1839 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1840 		wrpll_params.central_freq;
1841 
1842 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(display, NULL,
1843 							&crtc_state->dpll_hw_state);
1844 
1845 	return 0;
1846 }
1847 
1848 static int
1849 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1850 {
1851 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1852 	u32 ctrl1;
1853 
1854 	/*
1855 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1856 	 * as the DPLL id in this function.
1857 	 */
1858 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1859 	switch (crtc_state->port_clock / 2) {
1860 	case 81000:
1861 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1862 		break;
1863 	case 135000:
1864 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1865 		break;
1866 	case 270000:
1867 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1868 		break;
1869 		/* eDP 1.4 rates */
1870 	case 162000:
1871 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1872 		break;
1873 	case 108000:
1874 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1875 		break;
1876 	case 216000:
1877 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1878 		break;
1879 	}
1880 
1881 	hw_state->ctrl1 = ctrl1;
1882 
1883 	return 0;
1884 }
1885 
1886 static int skl_ddi_lcpll_get_freq(struct intel_display *display,
1887 				  const struct intel_dpll *pll,
1888 				  const struct intel_dpll_hw_state *dpll_hw_state)
1889 {
1890 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1891 	int link_clock = 0;
1892 
1893 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1894 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1895 	case DPLL_CTRL1_LINK_RATE_810:
1896 		link_clock = 81000;
1897 		break;
1898 	case DPLL_CTRL1_LINK_RATE_1080:
1899 		link_clock = 108000;
1900 		break;
1901 	case DPLL_CTRL1_LINK_RATE_1350:
1902 		link_clock = 135000;
1903 		break;
1904 	case DPLL_CTRL1_LINK_RATE_1620:
1905 		link_clock = 162000;
1906 		break;
1907 	case DPLL_CTRL1_LINK_RATE_2160:
1908 		link_clock = 216000;
1909 		break;
1910 	case DPLL_CTRL1_LINK_RATE_2700:
1911 		link_clock = 270000;
1912 		break;
1913 	default:
1914 		drm_WARN(display->drm, 1, "Unsupported link rate\n");
1915 		break;
1916 	}
1917 
1918 	return link_clock * 2;
1919 }
1920 
1921 static int skl_compute_dpll(struct intel_atomic_state *state,
1922 			    struct intel_crtc *crtc,
1923 			    struct intel_encoder *encoder)
1924 {
1925 	struct intel_crtc_state *crtc_state =
1926 		intel_atomic_get_new_crtc_state(state, crtc);
1927 
1928 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1929 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1930 	else if (intel_crtc_has_dp_encoder(crtc_state))
1931 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1932 	else
1933 		return -EINVAL;
1934 }
1935 
1936 static int skl_get_dpll(struct intel_atomic_state *state,
1937 			struct intel_crtc *crtc,
1938 			struct intel_encoder *encoder)
1939 {
1940 	struct intel_crtc_state *crtc_state =
1941 		intel_atomic_get_new_crtc_state(state, crtc);
1942 	struct intel_dpll *pll;
1943 
1944 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1945 		pll = intel_find_dpll(state, crtc,
1946 				      &crtc_state->dpll_hw_state,
1947 				      BIT(DPLL_ID_SKL_DPLL0));
1948 	else
1949 		pll = intel_find_dpll(state, crtc,
1950 				      &crtc_state->dpll_hw_state,
1951 				      BIT(DPLL_ID_SKL_DPLL3) |
1952 				      BIT(DPLL_ID_SKL_DPLL2) |
1953 				      BIT(DPLL_ID_SKL_DPLL1));
1954 	if (!pll)
1955 		return -EINVAL;
1956 
1957 	intel_reference_dpll(state, crtc,
1958 			     pll, &crtc_state->dpll_hw_state);
1959 
1960 	crtc_state->intel_dpll = pll;
1961 
1962 	return 0;
1963 }
1964 
1965 static int skl_ddi_pll_get_freq(struct intel_display *display,
1966 				const struct intel_dpll *pll,
1967 				const struct intel_dpll_hw_state *dpll_hw_state)
1968 {
1969 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1970 
1971 	/*
1972 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1973 	 * the internal shift for each field
1974 	 */
1975 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1976 		return skl_ddi_wrpll_get_freq(display, pll, dpll_hw_state);
1977 	else
1978 		return skl_ddi_lcpll_get_freq(display, pll, dpll_hw_state);
1979 }
1980 
1981 static void skl_update_dpll_ref_clks(struct intel_display *display)
1982 {
1983 	/* No SSC ref */
1984 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
1985 }
1986 
1987 static void skl_dump_hw_state(struct drm_printer *p,
1988 			      const struct intel_dpll_hw_state *dpll_hw_state)
1989 {
1990 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1991 
1992 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1993 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1994 }
1995 
1996 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1997 				 const struct intel_dpll_hw_state *_b)
1998 {
1999 	const struct skl_dpll_hw_state *a = &_a->skl;
2000 	const struct skl_dpll_hw_state *b = &_b->skl;
2001 
2002 	return a->ctrl1 == b->ctrl1 &&
2003 		a->cfgcr1 == b->cfgcr1 &&
2004 		a->cfgcr2 == b->cfgcr2;
2005 }
2006 
2007 static const struct intel_dpll_funcs skl_ddi_pll_funcs = {
2008 	.enable = skl_ddi_pll_enable,
2009 	.disable = skl_ddi_pll_disable,
2010 	.get_hw_state = skl_ddi_pll_get_hw_state,
2011 	.get_freq = skl_ddi_pll_get_freq,
2012 };
2013 
2014 static const struct intel_dpll_funcs skl_ddi_dpll0_funcs = {
2015 	.enable = skl_ddi_dpll0_enable,
2016 	.disable = skl_ddi_dpll0_disable,
2017 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2018 	.get_freq = skl_ddi_pll_get_freq,
2019 };
2020 
2021 static const struct dpll_info skl_plls[] = {
2022 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2023 	  .always_on = true, },
2024 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2025 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2026 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2027 	{}
2028 };
2029 
2030 static const struct intel_dpll_mgr skl_pll_mgr = {
2031 	.dpll_info = skl_plls,
2032 	.compute_dplls = skl_compute_dpll,
2033 	.get_dplls = skl_get_dpll,
2034 	.put_dplls = intel_put_dpll,
2035 	.update_ref_clks = skl_update_dpll_ref_clks,
2036 	.dump_hw_state = skl_dump_hw_state,
2037 	.compare_hw_state = skl_compare_hw_state,
2038 };
2039 
2040 static void bxt_ddi_pll_enable(struct intel_display *display,
2041 			       struct intel_dpll *pll,
2042 			       const struct intel_dpll_hw_state *dpll_hw_state)
2043 {
2044 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2045 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2046 	enum dpio_phy phy = DPIO_PHY0;
2047 	enum dpio_channel ch = DPIO_CH0;
2048 	u32 temp;
2049 	int ret;
2050 
2051 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2052 
2053 	/* Non-SSC reference */
2054 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2055 
2056 	if (display->platform.geminilake) {
2057 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2058 			     0, PORT_PLL_POWER_ENABLE);
2059 
2060 		ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
2061 					   PORT_PLL_POWER_STATE, PORT_PLL_POWER_STATE,
2062 					   200, 0, NULL);
2063 		if (ret)
2064 			drm_err(display->drm,
2065 				"Power state not set for PLL:%d\n", port);
2066 	}
2067 
2068 	/* Disable 10 bit clock */
2069 	intel_de_rmw(display, BXT_PORT_PLL_EBB_4(phy, ch),
2070 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2071 
2072 	/* Write P1 & P2 */
2073 	intel_de_rmw(display, BXT_PORT_PLL_EBB_0(phy, ch),
2074 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2075 
2076 	/* Write M2 integer */
2077 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 0),
2078 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2079 
2080 	/* Write N */
2081 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 1),
2082 		     PORT_PLL_N_MASK, hw_state->pll1);
2083 
2084 	/* Write M2 fraction */
2085 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 2),
2086 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2087 
2088 	/* Write M2 fraction enable */
2089 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 3),
2090 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2091 
2092 	/* Write coeff */
2093 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2094 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2095 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2096 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2097 	temp |= hw_state->pll6;
2098 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 6), temp);
2099 
2100 	/* Write calibration val */
2101 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 8),
2102 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2103 
2104 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 9),
2105 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2106 
2107 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2108 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2109 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2110 	temp |= hw_state->pll10;
2111 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 10), temp);
2112 
2113 	/* Recalibrate with new settings */
2114 	temp = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2115 	temp |= PORT_PLL_RECALIBRATE;
2116 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2117 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2118 	temp |= hw_state->ebb4;
2119 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2120 
2121 	/* Enable PLL */
2122 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2123 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2124 
2125 	ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
2126 				   PORT_PLL_LOCK, PORT_PLL_LOCK,
2127 				   200, 0, NULL);
2128 	if (ret)
2129 		drm_err(display->drm, "PLL %d not locked\n", port);
2130 
2131 	if (display->platform.geminilake) {
2132 		temp = intel_de_read(display, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2133 		temp |= DCC_DELAY_RANGE_2;
2134 		intel_de_write(display, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2135 	}
2136 
2137 	/*
2138 	 * While we write to the group register to program all lanes at once we
2139 	 * can read only lane registers and we pick lanes 0/1 for that.
2140 	 */
2141 	temp = intel_de_read(display, BXT_PORT_PCS_DW12_LN01(phy, ch));
2142 	temp &= ~LANE_STAGGER_MASK;
2143 	temp &= ~LANESTAGGER_STRAP_OVRD;
2144 	temp |= hw_state->pcsdw12;
2145 	intel_de_write(display, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2146 }
2147 
2148 static void bxt_ddi_pll_disable(struct intel_display *display,
2149 				struct intel_dpll *pll)
2150 {
2151 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2152 	int ret;
2153 
2154 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2155 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2156 
2157 	if (display->platform.geminilake) {
2158 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2159 			     PORT_PLL_POWER_ENABLE, 0);
2160 
2161 		ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
2162 					   PORT_PLL_POWER_STATE, 0,
2163 					   200, 0, NULL);
2164 		if (ret)
2165 			drm_err(display->drm,
2166 				"Power state not reset for PLL:%d\n", port);
2167 	}
2168 }
2169 
2170 static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
2171 				     struct intel_dpll *pll,
2172 				     struct intel_dpll_hw_state *dpll_hw_state)
2173 {
2174 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2175 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2176 	intel_wakeref_t wakeref;
2177 	enum dpio_phy phy;
2178 	enum dpio_channel ch;
2179 	u32 val;
2180 	bool ret;
2181 
2182 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2183 
2184 	wakeref = intel_display_power_get_if_enabled(display,
2185 						     POWER_DOMAIN_DISPLAY_CORE);
2186 	if (!wakeref)
2187 		return false;
2188 
2189 	ret = false;
2190 
2191 	val = intel_de_read(display, BXT_PORT_PLL_ENABLE(port));
2192 	if (!(val & PORT_PLL_ENABLE))
2193 		goto out;
2194 
2195 	hw_state->ebb0 = intel_de_read(display, BXT_PORT_PLL_EBB_0(phy, ch));
2196 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2197 
2198 	hw_state->ebb4 = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2199 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2200 
2201 	hw_state->pll0 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 0));
2202 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2203 
2204 	hw_state->pll1 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 1));
2205 	hw_state->pll1 &= PORT_PLL_N_MASK;
2206 
2207 	hw_state->pll2 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 2));
2208 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2209 
2210 	hw_state->pll3 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 3));
2211 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2212 
2213 	hw_state->pll6 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2214 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2215 			  PORT_PLL_INT_COEFF_MASK |
2216 			  PORT_PLL_GAIN_CTL_MASK;
2217 
2218 	hw_state->pll8 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 8));
2219 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2220 
2221 	hw_state->pll9 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 9));
2222 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2223 
2224 	hw_state->pll10 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2225 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2226 			   PORT_PLL_DCO_AMP_MASK;
2227 
2228 	/*
2229 	 * While we write to the group register to program all lanes at once we
2230 	 * can read only lane registers. We configure all lanes the same way, so
2231 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2232 	 */
2233 	hw_state->pcsdw12 = intel_de_read(display,
2234 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2235 	if (intel_de_read(display, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2236 		drm_dbg(display->drm,
2237 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2238 			hw_state->pcsdw12,
2239 			intel_de_read(display,
2240 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2241 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2242 
2243 	ret = true;
2244 
2245 out:
2246 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2247 
2248 	return ret;
2249 }
2250 
2251 /* pre-calculated values for DP linkrates */
2252 static const struct dpll bxt_dp_clk_val[] = {
2253 	/* m2 is .22 binary fixed point */
2254 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2255 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2256 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2257 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2258 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2259 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2260 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2261 };
2262 
2263 static int
2264 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2265 			  struct dpll *clk_div)
2266 {
2267 	struct intel_display *display = to_intel_display(crtc_state);
2268 
2269 	/* Calculate HDMI div */
2270 	/*
2271 	 * FIXME: tie the following calculation into
2272 	 * i9xx_crtc_compute_clock
2273 	 */
2274 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2275 		return -EINVAL;
2276 
2277 	drm_WARN_ON(display->drm, clk_div->m1 != 2);
2278 
2279 	return 0;
2280 }
2281 
2282 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2283 				    struct dpll *clk_div)
2284 {
2285 	struct intel_display *display = to_intel_display(crtc_state);
2286 	int i;
2287 
2288 	*clk_div = bxt_dp_clk_val[0];
2289 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2290 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2291 			*clk_div = bxt_dp_clk_val[i];
2292 			break;
2293 		}
2294 	}
2295 
2296 	chv_calc_dpll_params(display->dpll.ref_clks.nssc, clk_div);
2297 
2298 	drm_WARN_ON(display->drm, clk_div->vco == 0 ||
2299 		    clk_div->dot != crtc_state->port_clock);
2300 }
2301 
2302 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2303 				     const struct dpll *clk_div)
2304 {
2305 	struct intel_display *display = to_intel_display(crtc_state);
2306 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2307 	int clock = crtc_state->port_clock;
2308 	int vco = clk_div->vco;
2309 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2310 	u32 lanestagger;
2311 
2312 	if (vco >= 6200000 && vco <= 6700000) {
2313 		prop_coef = 4;
2314 		int_coef = 9;
2315 		gain_ctl = 3;
2316 		targ_cnt = 8;
2317 	} else if ((vco > 5400000 && vco < 6200000) ||
2318 			(vco >= 4800000 && vco < 5400000)) {
2319 		prop_coef = 5;
2320 		int_coef = 11;
2321 		gain_ctl = 3;
2322 		targ_cnt = 9;
2323 	} else if (vco == 5400000) {
2324 		prop_coef = 3;
2325 		int_coef = 8;
2326 		gain_ctl = 1;
2327 		targ_cnt = 9;
2328 	} else {
2329 		drm_err(display->drm, "Invalid VCO\n");
2330 		return -EINVAL;
2331 	}
2332 
2333 	if (clock > 270000)
2334 		lanestagger = 0x18;
2335 	else if (clock > 135000)
2336 		lanestagger = 0x0d;
2337 	else if (clock > 67000)
2338 		lanestagger = 0x07;
2339 	else if (clock > 33000)
2340 		lanestagger = 0x04;
2341 	else
2342 		lanestagger = 0x02;
2343 
2344 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2345 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2346 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2347 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2348 
2349 	if (clk_div->m2 & 0x3fffff)
2350 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2351 
2352 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2353 		PORT_PLL_INT_COEFF(int_coef) |
2354 		PORT_PLL_GAIN_CTL(gain_ctl);
2355 
2356 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2357 
2358 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2359 
2360 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2361 		PORT_PLL_DCO_AMP_OVR_EN_H;
2362 
2363 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2364 
2365 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2366 
2367 	return 0;
2368 }
2369 
2370 static int bxt_ddi_pll_get_freq(struct intel_display *display,
2371 				const struct intel_dpll *pll,
2372 				const struct intel_dpll_hw_state *dpll_hw_state)
2373 {
2374 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2375 	struct dpll clock;
2376 
2377 	clock.m1 = 2;
2378 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2379 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2380 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2381 					  hw_state->pll2);
2382 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2383 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2384 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2385 
2386 	return chv_calc_dpll_params(display->dpll.ref_clks.nssc, &clock);
2387 }
2388 
2389 static int
2390 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2391 {
2392 	struct dpll clk_div = {};
2393 
2394 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2395 
2396 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2397 }
2398 
2399 static int
2400 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2401 {
2402 	struct intel_display *display = to_intel_display(crtc_state);
2403 	struct dpll clk_div = {};
2404 	int ret;
2405 
2406 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2407 
2408 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2409 	if (ret)
2410 		return ret;
2411 
2412 	crtc_state->port_clock = bxt_ddi_pll_get_freq(display, NULL,
2413 						      &crtc_state->dpll_hw_state);
2414 
2415 	return 0;
2416 }
2417 
2418 static int bxt_compute_dpll(struct intel_atomic_state *state,
2419 			    struct intel_crtc *crtc,
2420 			    struct intel_encoder *encoder)
2421 {
2422 	struct intel_crtc_state *crtc_state =
2423 		intel_atomic_get_new_crtc_state(state, crtc);
2424 
2425 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2426 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2427 	else if (intel_crtc_has_dp_encoder(crtc_state))
2428 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2429 	else
2430 		return -EINVAL;
2431 }
2432 
2433 static int bxt_get_dpll(struct intel_atomic_state *state,
2434 			struct intel_crtc *crtc,
2435 			struct intel_encoder *encoder)
2436 {
2437 	struct intel_display *display = to_intel_display(state);
2438 	struct intel_crtc_state *crtc_state =
2439 		intel_atomic_get_new_crtc_state(state, crtc);
2440 	struct intel_dpll *pll;
2441 	enum intel_dpll_id id;
2442 
2443 	/* 1:1 mapping between ports and PLLs */
2444 	id = (enum intel_dpll_id) encoder->port;
2445 	pll = intel_get_dpll_by_id(display, id);
2446 
2447 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2448 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2449 
2450 	intel_reference_dpll(state, crtc,
2451 			     pll, &crtc_state->dpll_hw_state);
2452 
2453 	crtc_state->intel_dpll = pll;
2454 
2455 	return 0;
2456 }
2457 
2458 static void bxt_update_dpll_ref_clks(struct intel_display *display)
2459 {
2460 	display->dpll.ref_clks.ssc = 100000;
2461 	display->dpll.ref_clks.nssc = 100000;
2462 	/* DSI non-SSC ref 19.2MHz */
2463 }
2464 
2465 static void bxt_dump_hw_state(struct drm_printer *p,
2466 			      const struct intel_dpll_hw_state *dpll_hw_state)
2467 {
2468 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2469 
2470 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2471 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2472 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2473 		   hw_state->ebb0, hw_state->ebb4,
2474 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2475 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2476 		   hw_state->pcsdw12);
2477 }
2478 
2479 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2480 				 const struct intel_dpll_hw_state *_b)
2481 {
2482 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2483 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2484 
2485 	return a->ebb0 == b->ebb0 &&
2486 		a->ebb4 == b->ebb4 &&
2487 		a->pll0 == b->pll0 &&
2488 		a->pll1 == b->pll1 &&
2489 		a->pll2 == b->pll2 &&
2490 		a->pll3 == b->pll3 &&
2491 		a->pll6 == b->pll6 &&
2492 		a->pll8 == b->pll8 &&
2493 		a->pll10 == b->pll10 &&
2494 		a->pcsdw12 == b->pcsdw12;
2495 }
2496 
2497 static const struct intel_dpll_funcs bxt_ddi_pll_funcs = {
2498 	.enable = bxt_ddi_pll_enable,
2499 	.disable = bxt_ddi_pll_disable,
2500 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2501 	.get_freq = bxt_ddi_pll_get_freq,
2502 };
2503 
2504 static const struct dpll_info bxt_plls[] = {
2505 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2506 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2507 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2508 	{}
2509 };
2510 
2511 static const struct intel_dpll_mgr bxt_pll_mgr = {
2512 	.dpll_info = bxt_plls,
2513 	.compute_dplls = bxt_compute_dpll,
2514 	.get_dplls = bxt_get_dpll,
2515 	.put_dplls = intel_put_dpll,
2516 	.update_ref_clks = bxt_update_dpll_ref_clks,
2517 	.dump_hw_state = bxt_dump_hw_state,
2518 	.compare_hw_state = bxt_compare_hw_state,
2519 };
2520 
2521 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2522 				      int *qdiv, int *kdiv)
2523 {
2524 	/* even dividers */
2525 	if (bestdiv % 2 == 0) {
2526 		if (bestdiv == 2) {
2527 			*pdiv = 2;
2528 			*qdiv = 1;
2529 			*kdiv = 1;
2530 		} else if (bestdiv % 4 == 0) {
2531 			*pdiv = 2;
2532 			*qdiv = bestdiv / 4;
2533 			*kdiv = 2;
2534 		} else if (bestdiv % 6 == 0) {
2535 			*pdiv = 3;
2536 			*qdiv = bestdiv / 6;
2537 			*kdiv = 2;
2538 		} else if (bestdiv % 5 == 0) {
2539 			*pdiv = 5;
2540 			*qdiv = bestdiv / 10;
2541 			*kdiv = 2;
2542 		} else if (bestdiv % 14 == 0) {
2543 			*pdiv = 7;
2544 			*qdiv = bestdiv / 14;
2545 			*kdiv = 2;
2546 		}
2547 	} else {
2548 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2549 			*pdiv = bestdiv;
2550 			*qdiv = 1;
2551 			*kdiv = 1;
2552 		} else { /* 9, 15, 21 */
2553 			*pdiv = bestdiv / 3;
2554 			*qdiv = 1;
2555 			*kdiv = 3;
2556 		}
2557 	}
2558 }
2559 
2560 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2561 				      u32 dco_freq, u32 ref_freq,
2562 				      int pdiv, int qdiv, int kdiv)
2563 {
2564 	u32 dco;
2565 
2566 	switch (kdiv) {
2567 	case 1:
2568 		params->kdiv = 1;
2569 		break;
2570 	case 2:
2571 		params->kdiv = 2;
2572 		break;
2573 	case 3:
2574 		params->kdiv = 4;
2575 		break;
2576 	default:
2577 		WARN(1, "Incorrect KDiv\n");
2578 	}
2579 
2580 	switch (pdiv) {
2581 	case 2:
2582 		params->pdiv = 1;
2583 		break;
2584 	case 3:
2585 		params->pdiv = 2;
2586 		break;
2587 	case 5:
2588 		params->pdiv = 4;
2589 		break;
2590 	case 7:
2591 		params->pdiv = 8;
2592 		break;
2593 	default:
2594 		WARN(1, "Incorrect PDiv\n");
2595 	}
2596 
2597 	WARN_ON(kdiv != 2 && qdiv != 1);
2598 
2599 	params->qdiv_ratio = qdiv;
2600 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2601 
2602 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2603 
2604 	params->dco_integer = dco >> 15;
2605 	params->dco_fraction = dco & 0x7fff;
2606 }
2607 
2608 /*
2609  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2610  * Program half of the nominal DCO divider fraction value.
2611  */
2612 static bool
2613 ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
2614 {
2615 	return ((display->platform.elkhartlake &&
2616 		 IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
2617 		DISPLAY_VER(display) >= 12) &&
2618 		display->dpll.ref_clks.nssc == 38400;
2619 }
2620 
2621 struct icl_combo_pll_params {
2622 	int clock;
2623 	struct skl_wrpll_params wrpll;
2624 };
2625 
2626 /*
2627  * These values alrea already adjusted: they're the bits we write to the
2628  * registers, not the logical values.
2629  */
2630 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2631 	{ 540000,
2632 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2633 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2634 	{ 270000,
2635 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2636 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2637 	{ 162000,
2638 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2639 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2640 	{ 324000,
2641 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2642 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2643 	{ 216000,
2644 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2645 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2646 	{ 432000,
2647 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2648 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2649 	{ 648000,
2650 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2651 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2652 	{ 810000,
2653 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2654 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2655 };
2656 
2657 
2658 /* Also used for 38.4 MHz values. */
2659 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2660 	{ 540000,
2661 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2662 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2663 	{ 270000,
2664 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2665 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2666 	{ 162000,
2667 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2668 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2669 	{ 324000,
2670 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2671 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2672 	{ 216000,
2673 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2674 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2675 	{ 432000,
2676 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2677 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2678 	{ 648000,
2679 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2680 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2681 	{ 810000,
2682 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2683 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2684 };
2685 
2686 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2687 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2688 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2689 };
2690 
2691 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2692 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2693 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2694 };
2695 
2696 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2697 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2698 	/* the following params are unused */
2699 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2700 };
2701 
2702 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2703 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2704 	/* the following params are unused */
2705 };
2706 
2707 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2708 				 struct skl_wrpll_params *pll_params)
2709 {
2710 	struct intel_display *display = to_intel_display(crtc_state);
2711 	const struct icl_combo_pll_params *params =
2712 		display->dpll.ref_clks.nssc == 24000 ?
2713 		icl_dp_combo_pll_24MHz_values :
2714 		icl_dp_combo_pll_19_2MHz_values;
2715 	int clock = crtc_state->port_clock;
2716 	int i;
2717 
2718 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2719 		if (clock == params[i].clock) {
2720 			*pll_params = params[i].wrpll;
2721 			return 0;
2722 		}
2723 	}
2724 
2725 	MISSING_CASE(clock);
2726 	return -EINVAL;
2727 }
2728 
2729 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2730 			    struct skl_wrpll_params *pll_params)
2731 {
2732 	struct intel_display *display = to_intel_display(crtc_state);
2733 
2734 	if (DISPLAY_VER(display) >= 12) {
2735 		switch (display->dpll.ref_clks.nssc) {
2736 		default:
2737 			MISSING_CASE(display->dpll.ref_clks.nssc);
2738 			fallthrough;
2739 		case 19200:
2740 		case 38400:
2741 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2742 			break;
2743 		case 24000:
2744 			*pll_params = tgl_tbt_pll_24MHz_values;
2745 			break;
2746 		}
2747 	} else {
2748 		switch (display->dpll.ref_clks.nssc) {
2749 		default:
2750 			MISSING_CASE(display->dpll.ref_clks.nssc);
2751 			fallthrough;
2752 		case 19200:
2753 		case 38400:
2754 			*pll_params = icl_tbt_pll_19_2MHz_values;
2755 			break;
2756 		case 24000:
2757 			*pll_params = icl_tbt_pll_24MHz_values;
2758 			break;
2759 		}
2760 	}
2761 
2762 	return 0;
2763 }
2764 
2765 static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
2766 				    const struct intel_dpll *pll,
2767 				    const struct intel_dpll_hw_state *dpll_hw_state)
2768 {
2769 	/*
2770 	 * The PLL outputs multiple frequencies at the same time, selection is
2771 	 * made at DDI clock mux level.
2772 	 */
2773 	drm_WARN_ON(display->drm, 1);
2774 
2775 	return 0;
2776 }
2777 
2778 static int icl_wrpll_ref_clock(struct intel_display *display)
2779 {
2780 	int ref_clock = display->dpll.ref_clks.nssc;
2781 
2782 	/*
2783 	 * For ICL+, the spec states: if reference frequency is 38.4,
2784 	 * use 19.2 because the DPLL automatically divides that by 2.
2785 	 */
2786 	if (ref_clock == 38400)
2787 		ref_clock = 19200;
2788 
2789 	return ref_clock;
2790 }
2791 
2792 static int
2793 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2794 	       struct skl_wrpll_params *wrpll_params)
2795 {
2796 	struct intel_display *display = to_intel_display(crtc_state);
2797 	int ref_clock = icl_wrpll_ref_clock(display);
2798 	u32 afe_clock = crtc_state->port_clock * 5;
2799 	u32 dco_min = 7998000;
2800 	u32 dco_max = 10000000;
2801 	u32 dco_mid = (dco_min + dco_max) / 2;
2802 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2803 					 18, 20, 24, 28, 30, 32,  36,  40,
2804 					 42, 44, 48, 50, 52, 54,  56,  60,
2805 					 64, 66, 68, 70, 72, 76,  78,  80,
2806 					 84, 88, 90, 92, 96, 98, 100, 102,
2807 					  3,  5,  7,  9, 15, 21 };
2808 	u32 dco, best_dco = 0, dco_centrality = 0;
2809 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2810 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2811 
2812 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2813 		dco = afe_clock * dividers[d];
2814 
2815 		if (dco <= dco_max && dco >= dco_min) {
2816 			dco_centrality = abs(dco - dco_mid);
2817 
2818 			if (dco_centrality < best_dco_centrality) {
2819 				best_dco_centrality = dco_centrality;
2820 				best_div = dividers[d];
2821 				best_dco = dco;
2822 			}
2823 		}
2824 	}
2825 
2826 	if (best_div == 0)
2827 		return -EINVAL;
2828 
2829 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2830 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2831 				  pdiv, qdiv, kdiv);
2832 
2833 	return 0;
2834 }
2835 
2836 static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
2837 				      const struct intel_dpll *pll,
2838 				      const struct intel_dpll_hw_state *dpll_hw_state)
2839 {
2840 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2841 	int ref_clock = icl_wrpll_ref_clock(display);
2842 	u32 dco_fraction;
2843 	u32 p0, p1, p2, dco_freq;
2844 
2845 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2846 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2847 
2848 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2849 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2850 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2851 	else
2852 		p1 = 1;
2853 
2854 	switch (p0) {
2855 	case DPLL_CFGCR1_PDIV_2:
2856 		p0 = 2;
2857 		break;
2858 	case DPLL_CFGCR1_PDIV_3:
2859 		p0 = 3;
2860 		break;
2861 	case DPLL_CFGCR1_PDIV_5:
2862 		p0 = 5;
2863 		break;
2864 	case DPLL_CFGCR1_PDIV_7:
2865 		p0 = 7;
2866 		break;
2867 	}
2868 
2869 	switch (p2) {
2870 	case DPLL_CFGCR1_KDIV_1:
2871 		p2 = 1;
2872 		break;
2873 	case DPLL_CFGCR1_KDIV_2:
2874 		p2 = 2;
2875 		break;
2876 	case DPLL_CFGCR1_KDIV_3:
2877 		p2 = 3;
2878 		break;
2879 	}
2880 
2881 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2882 		   ref_clock;
2883 
2884 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2885 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2886 
2887 	if (ehl_combo_pll_div_frac_wa_needed(display))
2888 		dco_fraction *= 2;
2889 
2890 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2891 
2892 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
2893 		return 0;
2894 
2895 	return dco_freq / (p0 * p1 * p2 * 5);
2896 }
2897 
2898 static void icl_calc_dpll_state(struct intel_display *display,
2899 				const struct skl_wrpll_params *pll_params,
2900 				struct intel_dpll_hw_state *dpll_hw_state)
2901 {
2902 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2903 	u32 dco_fraction = pll_params->dco_fraction;
2904 
2905 	if (ehl_combo_pll_div_frac_wa_needed(display))
2906 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2907 
2908 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2909 			    pll_params->dco_integer;
2910 
2911 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2912 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2913 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2914 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2915 
2916 	if (DISPLAY_VER(display) >= 12)
2917 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2918 	else
2919 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2920 
2921 	if (display->vbt.override_afc_startup)
2922 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(display->vbt.override_afc_startup_val);
2923 }
2924 
2925 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2926 				    u32 *target_dco_khz,
2927 				    struct icl_dpll_hw_state *hw_state,
2928 				    bool is_dkl)
2929 {
2930 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2931 	u32 dco_min_freq, dco_max_freq;
2932 	unsigned int i;
2933 	int div2;
2934 
2935 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2936 	dco_max_freq = is_dp ? 8100000 : 10000000;
2937 
2938 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2939 		int div1 = div1_vals[i];
2940 
2941 		for (div2 = 10; div2 > 0; div2--) {
2942 			int dco = div1 * div2 * clock_khz * 5;
2943 			int a_divratio, tlinedrv, inputsel;
2944 			u32 hsdiv;
2945 
2946 			if (dco < dco_min_freq || dco > dco_max_freq)
2947 				continue;
2948 
2949 			if (div2 >= 2) {
2950 				/*
2951 				 * Note: a_divratio not matching TGL BSpec
2952 				 * algorithm but matching hardcoded values and
2953 				 * working on HW for DP alt-mode at least
2954 				 */
2955 				a_divratio = is_dp ? 10 : 5;
2956 				tlinedrv = is_dkl ? 1 : 2;
2957 			} else {
2958 				a_divratio = 5;
2959 				tlinedrv = 0;
2960 			}
2961 			inputsel = is_dp ? 0 : 1;
2962 
2963 			switch (div1) {
2964 			default:
2965 				MISSING_CASE(div1);
2966 				fallthrough;
2967 			case 2:
2968 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2969 				break;
2970 			case 3:
2971 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2972 				break;
2973 			case 5:
2974 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2975 				break;
2976 			case 7:
2977 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2978 				break;
2979 			}
2980 
2981 			*target_dco_khz = dco;
2982 
2983 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2984 
2985 			hw_state->mg_clktop2_coreclkctl1 =
2986 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2987 
2988 			hw_state->mg_clktop2_hsclkctl =
2989 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2990 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2991 				hsdiv |
2992 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2993 
2994 			return 0;
2995 		}
2996 	}
2997 
2998 	return -EINVAL;
2999 }
3000 
3001 /*
3002  * The specification for this function uses real numbers, so the math had to be
3003  * adapted to integer-only calculation, that's why it looks so different.
3004  */
3005 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3006 				 struct intel_dpll_hw_state *dpll_hw_state)
3007 {
3008 	struct intel_display *display = to_intel_display(crtc_state);
3009 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3010 	int refclk_khz = display->dpll.ref_clks.nssc;
3011 	int clock = crtc_state->port_clock;
3012 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3013 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3014 	u32 prop_coeff, int_coeff;
3015 	u32 tdc_targetcnt, feedfwgain;
3016 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3017 	u64 tmp;
3018 	bool use_ssc = false;
3019 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3020 	bool is_dkl = DISPLAY_VER(display) >= 12;
3021 	int ret;
3022 
3023 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3024 				       hw_state, is_dkl);
3025 	if (ret)
3026 		return ret;
3027 
3028 	m1div = 2;
3029 	m2div_int = dco_khz / (refclk_khz * m1div);
3030 	if (m2div_int > 255) {
3031 		if (!is_dkl) {
3032 			m1div = 4;
3033 			m2div_int = dco_khz / (refclk_khz * m1div);
3034 		}
3035 
3036 		if (m2div_int > 255)
3037 			return -EINVAL;
3038 	}
3039 	m2div_rem = dco_khz % (refclk_khz * m1div);
3040 
3041 	tmp = (u64)m2div_rem * (1 << 22);
3042 	do_div(tmp, refclk_khz * m1div);
3043 	m2div_frac = tmp;
3044 
3045 	switch (refclk_khz) {
3046 	case 19200:
3047 		iref_ndiv = 1;
3048 		iref_trim = 28;
3049 		iref_pulse_w = 1;
3050 		break;
3051 	case 24000:
3052 		iref_ndiv = 1;
3053 		iref_trim = 25;
3054 		iref_pulse_w = 2;
3055 		break;
3056 	case 38400:
3057 		iref_ndiv = 2;
3058 		iref_trim = 28;
3059 		iref_pulse_w = 1;
3060 		break;
3061 	default:
3062 		MISSING_CASE(refclk_khz);
3063 		return -EINVAL;
3064 	}
3065 
3066 	/*
3067 	 * tdc_res = 0.000003
3068 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3069 	 *
3070 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3071 	 * was supposed to be a division, but we rearranged the operations of
3072 	 * the formula to avoid early divisions so we don't multiply the
3073 	 * rounding errors.
3074 	 *
3075 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3076 	 * we also rearrange to work with integers.
3077 	 *
3078 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3079 	 * last division by 10.
3080 	 */
3081 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3082 
3083 	/*
3084 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3085 	 * 32 bits. That's not a problem since we round the division down
3086 	 * anyway.
3087 	 */
3088 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3089 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3090 
3091 	if (dco_khz >= 9000000) {
3092 		prop_coeff = 5;
3093 		int_coeff = 10;
3094 	} else {
3095 		prop_coeff = 4;
3096 		int_coeff = 8;
3097 	}
3098 
3099 	if (use_ssc) {
3100 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3101 		do_div(tmp, refclk_khz * m1div * 10000);
3102 		ssc_stepsize = tmp;
3103 
3104 		tmp = mul_u32_u32(dco_khz, 1000);
3105 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3106 	} else {
3107 		ssc_stepsize = 0;
3108 		ssc_steplen = 0;
3109 	}
3110 	ssc_steplog = 4;
3111 
3112 	/* write pll_state calculations */
3113 	if (is_dkl) {
3114 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3115 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3116 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3117 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3118 		if (display->vbt.override_afc_startup) {
3119 			u8 val = display->vbt.override_afc_startup_val;
3120 
3121 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3122 		}
3123 
3124 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3125 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3126 
3127 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3128 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3129 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3130 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3131 
3132 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3133 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3134 
3135 		hw_state->mg_pll_tdc_coldst_bias =
3136 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3137 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3138 
3139 	} else {
3140 		hw_state->mg_pll_div0 =
3141 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3142 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3143 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3144 
3145 		hw_state->mg_pll_div1 =
3146 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3147 			MG_PLL_DIV1_DITHER_DIV_2 |
3148 			MG_PLL_DIV1_NDIVRATIO(1) |
3149 			MG_PLL_DIV1_FBPREDIV(m1div);
3150 
3151 		hw_state->mg_pll_lf =
3152 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3153 			MG_PLL_LF_AFCCNTSEL_512 |
3154 			MG_PLL_LF_GAINCTRL(1) |
3155 			MG_PLL_LF_INT_COEFF(int_coeff) |
3156 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3157 
3158 		hw_state->mg_pll_frac_lock =
3159 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3160 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3161 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3162 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3163 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3164 		if (use_ssc || m2div_rem > 0)
3165 			hw_state->mg_pll_frac_lock |=
3166 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3167 
3168 		hw_state->mg_pll_ssc =
3169 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3170 			MG_PLL_SSC_TYPE(2) |
3171 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3172 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3173 			MG_PLL_SSC_FLLEN |
3174 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3175 
3176 		hw_state->mg_pll_tdc_coldst_bias =
3177 			MG_PLL_TDC_COLDST_COLDSTART |
3178 			MG_PLL_TDC_COLDST_IREFINT_EN |
3179 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3180 			MG_PLL_TDC_TDCOVCCORR_EN |
3181 			MG_PLL_TDC_TDCSEL(3);
3182 
3183 		hw_state->mg_pll_bias =
3184 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3185 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3186 			MG_PLL_BIAS_BIAS_BONUS(10) |
3187 			MG_PLL_BIAS_BIASCAL_EN |
3188 			MG_PLL_BIAS_CTRIM(12) |
3189 			MG_PLL_BIAS_VREF_RDAC(4) |
3190 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3191 
3192 		if (refclk_khz == 38400) {
3193 			hw_state->mg_pll_tdc_coldst_bias_mask =
3194 				MG_PLL_TDC_COLDST_COLDSTART;
3195 			hw_state->mg_pll_bias_mask = 0;
3196 		} else {
3197 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3198 			hw_state->mg_pll_bias_mask = -1U;
3199 		}
3200 
3201 		hw_state->mg_pll_tdc_coldst_bias &=
3202 			hw_state->mg_pll_tdc_coldst_bias_mask;
3203 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3204 	}
3205 
3206 	return 0;
3207 }
3208 
3209 static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
3210 				   const struct intel_dpll *pll,
3211 				   const struct intel_dpll_hw_state *dpll_hw_state)
3212 {
3213 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3214 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3215 	u64 tmp;
3216 
3217 	ref_clock = display->dpll.ref_clks.nssc;
3218 
3219 	if (DISPLAY_VER(display) >= 12) {
3220 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3221 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3222 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3223 
3224 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3225 			m2_frac = hw_state->mg_pll_bias &
3226 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3227 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3228 		} else {
3229 			m2_frac = 0;
3230 		}
3231 	} else {
3232 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3233 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3234 
3235 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3236 			m2_frac = hw_state->mg_pll_div0 &
3237 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3238 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3239 		} else {
3240 			m2_frac = 0;
3241 		}
3242 	}
3243 
3244 	switch (hw_state->mg_clktop2_hsclkctl &
3245 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3246 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3247 		div1 = 2;
3248 		break;
3249 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3250 		div1 = 3;
3251 		break;
3252 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3253 		div1 = 5;
3254 		break;
3255 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3256 		div1 = 7;
3257 		break;
3258 	default:
3259 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3260 		return 0;
3261 	}
3262 
3263 	div2 = (hw_state->mg_clktop2_hsclkctl &
3264 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3265 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3266 
3267 	/* div2 value of 0 is same as 1 means no div */
3268 	if (div2 == 0)
3269 		div2 = 1;
3270 
3271 	/*
3272 	 * Adjust the original formula to delay the division by 2^22 in order to
3273 	 * minimize possible rounding errors.
3274 	 */
3275 	tmp = (u64)m1 * m2_int * ref_clock +
3276 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3277 	tmp = div_u64(tmp, 5 * div1 * div2);
3278 
3279 	return tmp;
3280 }
3281 
3282 /**
3283  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3284  * @crtc_state: state for the CRTC to select the DPLL for
3285  * @port_dpll_id: the active @port_dpll_id to select
3286  *
3287  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3288  * CRTC.
3289  */
3290 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3291 			      enum icl_port_dpll_id port_dpll_id)
3292 {
3293 	struct icl_port_dpll *port_dpll =
3294 		&crtc_state->icl_port_dplls[port_dpll_id];
3295 
3296 	crtc_state->intel_dpll = port_dpll->pll;
3297 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3298 }
3299 
3300 static void icl_update_active_dpll(struct intel_atomic_state *state,
3301 				   struct intel_crtc *crtc,
3302 				   struct intel_encoder *encoder)
3303 {
3304 	struct intel_crtc_state *crtc_state =
3305 		intel_atomic_get_new_crtc_state(state, crtc);
3306 	struct intel_digital_port *primary_port;
3307 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3308 
3309 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3310 		enc_to_mst(encoder)->primary :
3311 		enc_to_dig_port(encoder);
3312 
3313 	if (primary_port &&
3314 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3315 	     intel_tc_port_in_legacy_mode(primary_port)))
3316 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3317 
3318 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3319 }
3320 
3321 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3322 				      struct intel_crtc *crtc)
3323 {
3324 	struct intel_display *display = to_intel_display(state);
3325 	struct intel_crtc_state *crtc_state =
3326 		intel_atomic_get_new_crtc_state(state, crtc);
3327 	struct icl_port_dpll *port_dpll =
3328 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3329 	struct skl_wrpll_params pll_params = {};
3330 	int ret;
3331 
3332 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3333 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3334 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3335 	else
3336 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3337 
3338 	if (ret)
3339 		return ret;
3340 
3341 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3342 
3343 	/* this is mainly for the fastset check */
3344 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3345 
3346 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(display, NULL,
3347 							    &port_dpll->hw_state);
3348 
3349 	return 0;
3350 }
3351 
3352 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3353 				  struct intel_crtc *crtc,
3354 				  struct intel_encoder *encoder)
3355 {
3356 	struct intel_display *display = to_intel_display(crtc);
3357 	struct intel_crtc_state *crtc_state =
3358 		intel_atomic_get_new_crtc_state(state, crtc);
3359 	struct icl_port_dpll *port_dpll =
3360 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3361 	enum port port = encoder->port;
3362 	unsigned long dpll_mask;
3363 
3364 	if (display->platform.alderlake_s) {
3365 		dpll_mask =
3366 			BIT(DPLL_ID_DG1_DPLL3) |
3367 			BIT(DPLL_ID_DG1_DPLL2) |
3368 			BIT(DPLL_ID_ICL_DPLL1) |
3369 			BIT(DPLL_ID_ICL_DPLL0);
3370 	} else if (display->platform.dg1) {
3371 		if (port == PORT_D || port == PORT_E) {
3372 			dpll_mask =
3373 				BIT(DPLL_ID_DG1_DPLL2) |
3374 				BIT(DPLL_ID_DG1_DPLL3);
3375 		} else {
3376 			dpll_mask =
3377 				BIT(DPLL_ID_DG1_DPLL0) |
3378 				BIT(DPLL_ID_DG1_DPLL1);
3379 		}
3380 	} else if (display->platform.rocketlake) {
3381 		dpll_mask =
3382 			BIT(DPLL_ID_EHL_DPLL4) |
3383 			BIT(DPLL_ID_ICL_DPLL1) |
3384 			BIT(DPLL_ID_ICL_DPLL0);
3385 	} else if ((display->platform.jasperlake ||
3386 		    display->platform.elkhartlake) &&
3387 		   port != PORT_A) {
3388 		dpll_mask =
3389 			BIT(DPLL_ID_EHL_DPLL4) |
3390 			BIT(DPLL_ID_ICL_DPLL1) |
3391 			BIT(DPLL_ID_ICL_DPLL0);
3392 	} else {
3393 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3394 	}
3395 
3396 	/* Eliminate DPLLs from consideration if reserved by HTI */
3397 	dpll_mask &= ~intel_hti_dpll_mask(display);
3398 
3399 	port_dpll->pll = intel_find_dpll(state, crtc,
3400 					 &port_dpll->hw_state,
3401 					 dpll_mask);
3402 	if (!port_dpll->pll)
3403 		return -EINVAL;
3404 
3405 	intel_reference_dpll(state, crtc,
3406 			     port_dpll->pll, &port_dpll->hw_state);
3407 
3408 	icl_update_active_dpll(state, crtc, encoder);
3409 
3410 	return 0;
3411 }
3412 
3413 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3414 				    struct intel_crtc *crtc)
3415 {
3416 	struct intel_display *display = to_intel_display(state);
3417 	struct intel_crtc_state *crtc_state =
3418 		intel_atomic_get_new_crtc_state(state, crtc);
3419 	const struct intel_crtc_state *old_crtc_state =
3420 		intel_atomic_get_old_crtc_state(state, crtc);
3421 	struct icl_port_dpll *port_dpll =
3422 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3423 	struct skl_wrpll_params pll_params = {};
3424 	int ret;
3425 
3426 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3427 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3428 	if (ret)
3429 		return ret;
3430 
3431 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3432 
3433 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3434 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3435 	if (ret)
3436 		return ret;
3437 
3438 	/* this is mainly for the fastset check */
3439 	if (old_crtc_state->intel_dpll &&
3440 	    old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3441 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3442 	else
3443 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3444 
3445 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(display, NULL,
3446 							 &port_dpll->hw_state);
3447 
3448 	return 0;
3449 }
3450 
3451 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3452 				struct intel_crtc *crtc,
3453 				struct intel_encoder *encoder)
3454 {
3455 	struct intel_crtc_state *crtc_state =
3456 		intel_atomic_get_new_crtc_state(state, crtc);
3457 	struct icl_port_dpll *port_dpll =
3458 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3459 	enum intel_dpll_id dpll_id;
3460 	int ret;
3461 
3462 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3463 	port_dpll->pll = intel_find_dpll(state, crtc,
3464 					 &port_dpll->hw_state,
3465 					 BIT(DPLL_ID_ICL_TBTPLL));
3466 	if (!port_dpll->pll)
3467 		return -EINVAL;
3468 	intel_reference_dpll(state, crtc,
3469 			     port_dpll->pll, &port_dpll->hw_state);
3470 
3471 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3472 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3473 	port_dpll->pll = intel_find_dpll(state, crtc,
3474 					 &port_dpll->hw_state,
3475 					 BIT(dpll_id));
3476 	if (!port_dpll->pll) {
3477 		ret = -EINVAL;
3478 		goto err_unreference_tbt_pll;
3479 	}
3480 	intel_reference_dpll(state, crtc,
3481 			     port_dpll->pll, &port_dpll->hw_state);
3482 
3483 	icl_update_active_dpll(state, crtc, encoder);
3484 
3485 	return 0;
3486 
3487 err_unreference_tbt_pll:
3488 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3489 	intel_unreference_dpll(state, crtc, port_dpll->pll);
3490 
3491 	return ret;
3492 }
3493 
3494 static int icl_compute_dplls(struct intel_atomic_state *state,
3495 			     struct intel_crtc *crtc,
3496 			     struct intel_encoder *encoder)
3497 {
3498 	if (intel_encoder_is_combo(encoder))
3499 		return icl_compute_combo_phy_dpll(state, crtc);
3500 	else if (intel_encoder_is_tc(encoder))
3501 		return icl_compute_tc_phy_dplls(state, crtc);
3502 
3503 	MISSING_CASE(encoder->port);
3504 
3505 	return 0;
3506 }
3507 
3508 static int icl_get_dplls(struct intel_atomic_state *state,
3509 			 struct intel_crtc *crtc,
3510 			 struct intel_encoder *encoder)
3511 {
3512 	if (intel_encoder_is_combo(encoder))
3513 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3514 	else if (intel_encoder_is_tc(encoder))
3515 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3516 
3517 	MISSING_CASE(encoder->port);
3518 
3519 	return -EINVAL;
3520 }
3521 
3522 static void icl_put_dplls(struct intel_atomic_state *state,
3523 			  struct intel_crtc *crtc)
3524 {
3525 	const struct intel_crtc_state *old_crtc_state =
3526 		intel_atomic_get_old_crtc_state(state, crtc);
3527 	struct intel_crtc_state *new_crtc_state =
3528 		intel_atomic_get_new_crtc_state(state, crtc);
3529 	enum icl_port_dpll_id id;
3530 
3531 	new_crtc_state->intel_dpll = NULL;
3532 
3533 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3534 		const struct icl_port_dpll *old_port_dpll =
3535 			&old_crtc_state->icl_port_dplls[id];
3536 		struct icl_port_dpll *new_port_dpll =
3537 			&new_crtc_state->icl_port_dplls[id];
3538 
3539 		new_port_dpll->pll = NULL;
3540 
3541 		if (!old_port_dpll->pll)
3542 			continue;
3543 
3544 		intel_unreference_dpll(state, crtc, old_port_dpll->pll);
3545 	}
3546 }
3547 
3548 static bool mg_pll_get_hw_state(struct intel_display *display,
3549 				struct intel_dpll *pll,
3550 				struct intel_dpll_hw_state *dpll_hw_state)
3551 {
3552 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3553 	const enum intel_dpll_id id = pll->info->id;
3554 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3555 	intel_wakeref_t wakeref;
3556 	bool ret = false;
3557 	u32 val;
3558 
3559 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
3560 
3561 	wakeref = intel_display_power_get_if_enabled(display,
3562 						     POWER_DOMAIN_DISPLAY_CORE);
3563 	if (!wakeref)
3564 		return false;
3565 
3566 	val = intel_de_read(display, enable_reg);
3567 	if (!(val & PLL_ENABLE))
3568 		goto out;
3569 
3570 	hw_state->mg_refclkin_ctl = intel_de_read(display,
3571 						  MG_REFCLKIN_CTL(tc_port));
3572 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3573 
3574 	hw_state->mg_clktop2_coreclkctl1 =
3575 		intel_de_read(display, MG_CLKTOP2_CORECLKCTL1(tc_port));
3576 	hw_state->mg_clktop2_coreclkctl1 &=
3577 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3578 
3579 	hw_state->mg_clktop2_hsclkctl =
3580 		intel_de_read(display, MG_CLKTOP2_HSCLKCTL(tc_port));
3581 	hw_state->mg_clktop2_hsclkctl &=
3582 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3583 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3584 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3585 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3586 
3587 	hw_state->mg_pll_div0 = intel_de_read(display, MG_PLL_DIV0(tc_port));
3588 	hw_state->mg_pll_div1 = intel_de_read(display, MG_PLL_DIV1(tc_port));
3589 	hw_state->mg_pll_lf = intel_de_read(display, MG_PLL_LF(tc_port));
3590 	hw_state->mg_pll_frac_lock = intel_de_read(display,
3591 						   MG_PLL_FRAC_LOCK(tc_port));
3592 	hw_state->mg_pll_ssc = intel_de_read(display, MG_PLL_SSC(tc_port));
3593 
3594 	hw_state->mg_pll_bias = intel_de_read(display, MG_PLL_BIAS(tc_port));
3595 	hw_state->mg_pll_tdc_coldst_bias =
3596 		intel_de_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3597 
3598 	if (display->dpll.ref_clks.nssc == 38400) {
3599 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3600 		hw_state->mg_pll_bias_mask = 0;
3601 	} else {
3602 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3603 		hw_state->mg_pll_bias_mask = -1U;
3604 	}
3605 
3606 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3607 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3608 
3609 	ret = true;
3610 out:
3611 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3612 	return ret;
3613 }
3614 
3615 static bool dkl_pll_get_hw_state(struct intel_display *display,
3616 				 struct intel_dpll *pll,
3617 				 struct intel_dpll_hw_state *dpll_hw_state)
3618 {
3619 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3620 	const enum intel_dpll_id id = pll->info->id;
3621 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3622 	intel_wakeref_t wakeref;
3623 	bool ret = false;
3624 	u32 val;
3625 
3626 	wakeref = intel_display_power_get_if_enabled(display,
3627 						     POWER_DOMAIN_DISPLAY_CORE);
3628 	if (!wakeref)
3629 		return false;
3630 
3631 	val = intel_de_read(display, intel_tc_pll_enable_reg(display, pll));
3632 	if (!(val & PLL_ENABLE))
3633 		goto out;
3634 
3635 	/*
3636 	 * All registers read here have the same HIP_INDEX_REG even though
3637 	 * they are on different building blocks
3638 	 */
3639 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(display,
3640 						       DKL_REFCLKIN_CTL(tc_port));
3641 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3642 
3643 	hw_state->mg_clktop2_hsclkctl =
3644 		intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3645 	hw_state->mg_clktop2_hsclkctl &=
3646 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3647 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3648 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3649 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3650 
3651 	hw_state->mg_clktop2_coreclkctl1 =
3652 		intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3653 	hw_state->mg_clktop2_coreclkctl1 &=
3654 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3655 
3656 	hw_state->mg_pll_div0 = intel_dkl_phy_read(display, DKL_PLL_DIV0(tc_port));
3657 	val = DKL_PLL_DIV0_MASK;
3658 	if (display->vbt.override_afc_startup)
3659 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3660 	hw_state->mg_pll_div0 &= val;
3661 
3662 	hw_state->mg_pll_div1 = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3663 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3664 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3665 
3666 	hw_state->mg_pll_ssc = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3667 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3668 				 DKL_PLL_SSC_STEP_LEN_MASK |
3669 				 DKL_PLL_SSC_STEP_NUM_MASK |
3670 				 DKL_PLL_SSC_EN);
3671 
3672 	hw_state->mg_pll_bias = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3673 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3674 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3675 
3676 	hw_state->mg_pll_tdc_coldst_bias =
3677 		intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3678 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3679 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3680 
3681 	ret = true;
3682 out:
3683 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3684 	return ret;
3685 }
3686 
3687 static bool icl_pll_get_hw_state(struct intel_display *display,
3688 				 struct intel_dpll *pll,
3689 				 struct intel_dpll_hw_state *dpll_hw_state,
3690 				 i915_reg_t enable_reg)
3691 {
3692 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3693 	const enum intel_dpll_id id = pll->info->id;
3694 	intel_wakeref_t wakeref;
3695 	bool ret = false;
3696 	u32 val;
3697 
3698 	wakeref = intel_display_power_get_if_enabled(display,
3699 						     POWER_DOMAIN_DISPLAY_CORE);
3700 	if (!wakeref)
3701 		return false;
3702 
3703 	val = intel_de_read(display, enable_reg);
3704 	if (!(val & PLL_ENABLE))
3705 		goto out;
3706 
3707 	if (display->platform.alderlake_s) {
3708 		hw_state->cfgcr0 = intel_de_read(display, ADLS_DPLL_CFGCR0(id));
3709 		hw_state->cfgcr1 = intel_de_read(display, ADLS_DPLL_CFGCR1(id));
3710 	} else if (display->platform.dg1) {
3711 		hw_state->cfgcr0 = intel_de_read(display, DG1_DPLL_CFGCR0(id));
3712 		hw_state->cfgcr1 = intel_de_read(display, DG1_DPLL_CFGCR1(id));
3713 	} else if (display->platform.rocketlake) {
3714 		hw_state->cfgcr0 = intel_de_read(display,
3715 						 RKL_DPLL_CFGCR0(id));
3716 		hw_state->cfgcr1 = intel_de_read(display,
3717 						 RKL_DPLL_CFGCR1(id));
3718 	} else if (DISPLAY_VER(display) >= 12) {
3719 		hw_state->cfgcr0 = intel_de_read(display,
3720 						 TGL_DPLL_CFGCR0(id));
3721 		hw_state->cfgcr1 = intel_de_read(display,
3722 						 TGL_DPLL_CFGCR1(id));
3723 		if (display->vbt.override_afc_startup) {
3724 			hw_state->div0 = intel_de_read(display, TGL_DPLL0_DIV0(id));
3725 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3726 		}
3727 	} else {
3728 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3729 		    id == DPLL_ID_EHL_DPLL4) {
3730 			hw_state->cfgcr0 = intel_de_read(display,
3731 							 ICL_DPLL_CFGCR0(4));
3732 			hw_state->cfgcr1 = intel_de_read(display,
3733 							 ICL_DPLL_CFGCR1(4));
3734 		} else {
3735 			hw_state->cfgcr0 = intel_de_read(display,
3736 							 ICL_DPLL_CFGCR0(id));
3737 			hw_state->cfgcr1 = intel_de_read(display,
3738 							 ICL_DPLL_CFGCR1(id));
3739 		}
3740 	}
3741 
3742 	ret = true;
3743 out:
3744 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3745 	return ret;
3746 }
3747 
3748 static bool combo_pll_get_hw_state(struct intel_display *display,
3749 				   struct intel_dpll *pll,
3750 				   struct intel_dpll_hw_state *dpll_hw_state)
3751 {
3752 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3753 
3754 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
3755 }
3756 
3757 static bool tbt_pll_get_hw_state(struct intel_display *display,
3758 				 struct intel_dpll *pll,
3759 				 struct intel_dpll_hw_state *dpll_hw_state)
3760 {
3761 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
3762 }
3763 
3764 static void icl_dpll_write(struct intel_display *display,
3765 			   struct intel_dpll *pll,
3766 			   const struct icl_dpll_hw_state *hw_state)
3767 {
3768 	const enum intel_dpll_id id = pll->info->id;
3769 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3770 
3771 	if (display->platform.alderlake_s) {
3772 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3773 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3774 	} else if (display->platform.dg1) {
3775 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3776 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3777 	} else if (display->platform.rocketlake) {
3778 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3779 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3780 	} else if (DISPLAY_VER(display) >= 12) {
3781 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3782 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3783 		div0_reg = TGL_DPLL0_DIV0(id);
3784 	} else {
3785 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3786 		    id == DPLL_ID_EHL_DPLL4) {
3787 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3788 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3789 		} else {
3790 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3791 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3792 		}
3793 	}
3794 
3795 	intel_de_write(display, cfgcr0_reg, hw_state->cfgcr0);
3796 	intel_de_write(display, cfgcr1_reg, hw_state->cfgcr1);
3797 	drm_WARN_ON_ONCE(display->drm, display->vbt.override_afc_startup &&
3798 			 !i915_mmio_reg_valid(div0_reg));
3799 	if (display->vbt.override_afc_startup &&
3800 	    i915_mmio_reg_valid(div0_reg))
3801 		intel_de_rmw(display, div0_reg,
3802 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3803 	intel_de_posting_read(display, cfgcr1_reg);
3804 }
3805 
3806 static void icl_mg_pll_write(struct intel_display *display,
3807 			     struct intel_dpll *pll,
3808 			     const struct icl_dpll_hw_state *hw_state)
3809 {
3810 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3811 
3812 	/*
3813 	 * Some of the following registers have reserved fields, so program
3814 	 * these with RMW based on a mask. The mask can be fixed or generated
3815 	 * during the calc/readout phase if the mask depends on some other HW
3816 	 * state like refclk, see icl_calc_mg_pll_state().
3817 	 */
3818 	intel_de_rmw(display, MG_REFCLKIN_CTL(tc_port),
3819 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3820 
3821 	intel_de_rmw(display, MG_CLKTOP2_CORECLKCTL1(tc_port),
3822 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3823 		     hw_state->mg_clktop2_coreclkctl1);
3824 
3825 	intel_de_rmw(display, MG_CLKTOP2_HSCLKCTL(tc_port),
3826 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3827 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3828 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3829 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3830 		     hw_state->mg_clktop2_hsclkctl);
3831 
3832 	intel_de_write(display, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3833 	intel_de_write(display, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3834 	intel_de_write(display, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3835 	intel_de_write(display, MG_PLL_FRAC_LOCK(tc_port),
3836 		       hw_state->mg_pll_frac_lock);
3837 	intel_de_write(display, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3838 
3839 	intel_de_rmw(display, MG_PLL_BIAS(tc_port),
3840 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3841 
3842 	intel_de_rmw(display, MG_PLL_TDC_COLDST_BIAS(tc_port),
3843 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3844 		     hw_state->mg_pll_tdc_coldst_bias);
3845 
3846 	intel_de_posting_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3847 }
3848 
3849 static void dkl_pll_write(struct intel_display *display,
3850 			  struct intel_dpll *pll,
3851 			  const struct icl_dpll_hw_state *hw_state)
3852 {
3853 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3854 	u32 val;
3855 
3856 	/*
3857 	 * All registers programmed here have the same HIP_INDEX_REG even
3858 	 * though on different building block
3859 	 */
3860 	/* All the registers are RMW */
3861 	val = intel_dkl_phy_read(display, DKL_REFCLKIN_CTL(tc_port));
3862 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3863 	val |= hw_state->mg_refclkin_ctl;
3864 	intel_dkl_phy_write(display, DKL_REFCLKIN_CTL(tc_port), val);
3865 
3866 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3867 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3868 	val |= hw_state->mg_clktop2_coreclkctl1;
3869 	intel_dkl_phy_write(display, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3870 
3871 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3872 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3873 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3874 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3875 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3876 	val |= hw_state->mg_clktop2_hsclkctl;
3877 	intel_dkl_phy_write(display, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3878 
3879 	val = DKL_PLL_DIV0_MASK;
3880 	if (display->vbt.override_afc_startup)
3881 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3882 	intel_dkl_phy_rmw(display, DKL_PLL_DIV0(tc_port), val,
3883 			  hw_state->mg_pll_div0);
3884 
3885 	val = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3886 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3887 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3888 	val |= hw_state->mg_pll_div1;
3889 	intel_dkl_phy_write(display, DKL_PLL_DIV1(tc_port), val);
3890 
3891 	val = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3892 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3893 		 DKL_PLL_SSC_STEP_LEN_MASK |
3894 		 DKL_PLL_SSC_STEP_NUM_MASK |
3895 		 DKL_PLL_SSC_EN);
3896 	val |= hw_state->mg_pll_ssc;
3897 	intel_dkl_phy_write(display, DKL_PLL_SSC(tc_port), val);
3898 
3899 	val = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3900 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3901 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3902 	val |= hw_state->mg_pll_bias;
3903 	intel_dkl_phy_write(display, DKL_PLL_BIAS(tc_port), val);
3904 
3905 	val = intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3906 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3907 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3908 	val |= hw_state->mg_pll_tdc_coldst_bias;
3909 	intel_dkl_phy_write(display, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3910 
3911 	intel_dkl_phy_posting_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3912 }
3913 
3914 static void icl_pll_power_enable(struct intel_display *display,
3915 				 struct intel_dpll *pll,
3916 				 i915_reg_t enable_reg)
3917 {
3918 	intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
3919 
3920 	/*
3921 	 * The spec says we need to "wait" but it also says it should be
3922 	 * immediate.
3923 	 */
3924 	if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1))
3925 		drm_err(display->drm, "PLL %d Power not enabled\n",
3926 			pll->info->id);
3927 }
3928 
3929 static void icl_pll_enable(struct intel_display *display,
3930 			   struct intel_dpll *pll,
3931 			   i915_reg_t enable_reg)
3932 {
3933 	intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
3934 
3935 	/* Timeout is actually 600us. */
3936 	if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1))
3937 		drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
3938 }
3939 
3940 static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_dpll *pll)
3941 {
3942 	u32 val;
3943 
3944 	if (!(display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) ||
3945 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3946 		return;
3947 	/*
3948 	 * Wa_16011069516:adl-p[a0]
3949 	 *
3950 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3951 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3952 	 * sanity check this assumption with a double read, which presumably
3953 	 * returns the correct value even with clock gating on.
3954 	 *
3955 	 * Instead of the usual place for workarounds we apply this one here,
3956 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3957 	 */
3958 	val = intel_de_read(display, TRANS_CMTG_CHICKEN);
3959 	val = intel_de_rmw(display, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3960 	if (drm_WARN_ON(display->drm, val & ~DISABLE_DPT_CLK_GATING))
3961 		drm_dbg_kms(display->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3962 }
3963 
3964 static void combo_pll_enable(struct intel_display *display,
3965 			     struct intel_dpll *pll,
3966 			     const struct intel_dpll_hw_state *dpll_hw_state)
3967 {
3968 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3969 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3970 
3971 	icl_pll_power_enable(display, pll, enable_reg);
3972 
3973 	icl_dpll_write(display, pll, hw_state);
3974 
3975 	/*
3976 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3977 	 * paths should already be setting the appropriate voltage, hence we do
3978 	 * nothing here.
3979 	 */
3980 
3981 	icl_pll_enable(display, pll, enable_reg);
3982 
3983 	adlp_cmtg_clock_gating_wa(display, pll);
3984 
3985 	/* DVFS post sequence would be here. See the comment above. */
3986 }
3987 
3988 static void tbt_pll_enable(struct intel_display *display,
3989 			   struct intel_dpll *pll,
3990 			   const struct intel_dpll_hw_state *dpll_hw_state)
3991 {
3992 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3993 
3994 	icl_pll_power_enable(display, pll, TBT_PLL_ENABLE);
3995 
3996 	icl_dpll_write(display, pll, hw_state);
3997 
3998 	/*
3999 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4000 	 * paths should already be setting the appropriate voltage, hence we do
4001 	 * nothing here.
4002 	 */
4003 
4004 	icl_pll_enable(display, pll, TBT_PLL_ENABLE);
4005 
4006 	/* DVFS post sequence would be here. See the comment above. */
4007 }
4008 
4009 static void mg_pll_enable(struct intel_display *display,
4010 			  struct intel_dpll *pll,
4011 			  const struct intel_dpll_hw_state *dpll_hw_state)
4012 {
4013 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4014 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4015 
4016 	icl_pll_power_enable(display, pll, enable_reg);
4017 
4018 	if (DISPLAY_VER(display) >= 12)
4019 		dkl_pll_write(display, pll, hw_state);
4020 	else
4021 		icl_mg_pll_write(display, pll, hw_state);
4022 
4023 	/*
4024 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4025 	 * paths should already be setting the appropriate voltage, hence we do
4026 	 * nothing here.
4027 	 */
4028 
4029 	icl_pll_enable(display, pll, enable_reg);
4030 
4031 	/* DVFS post sequence would be here. See the comment above. */
4032 }
4033 
4034 static void icl_pll_disable(struct intel_display *display,
4035 			    struct intel_dpll *pll,
4036 			    i915_reg_t enable_reg)
4037 {
4038 	/* The first steps are done by intel_ddi_post_disable(). */
4039 
4040 	/*
4041 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4042 	 * paths should already be setting the appropriate voltage, hence we do
4043 	 * nothing here.
4044 	 */
4045 
4046 	intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
4047 
4048 	/* Timeout is actually 1us. */
4049 	if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1))
4050 		drm_err(display->drm, "PLL %d locked\n", pll->info->id);
4051 
4052 	/* DVFS post sequence would be here. See the comment above. */
4053 
4054 	intel_de_rmw(display, enable_reg, PLL_POWER_ENABLE, 0);
4055 
4056 	/*
4057 	 * The spec says we need to "wait" but it also says it should be
4058 	 * immediate.
4059 	 */
4060 	if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1))
4061 		drm_err(display->drm, "PLL %d Power not disabled\n",
4062 			pll->info->id);
4063 }
4064 
4065 static void combo_pll_disable(struct intel_display *display,
4066 			      struct intel_dpll *pll)
4067 {
4068 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
4069 
4070 	icl_pll_disable(display, pll, enable_reg);
4071 }
4072 
4073 static void tbt_pll_disable(struct intel_display *display,
4074 			    struct intel_dpll *pll)
4075 {
4076 	icl_pll_disable(display, pll, TBT_PLL_ENABLE);
4077 }
4078 
4079 static void mg_pll_disable(struct intel_display *display,
4080 			   struct intel_dpll *pll)
4081 {
4082 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4083 
4084 	icl_pll_disable(display, pll, enable_reg);
4085 }
4086 
4087 static void icl_update_dpll_ref_clks(struct intel_display *display)
4088 {
4089 	/* No SSC ref */
4090 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
4091 }
4092 
4093 static void icl_dump_hw_state(struct drm_printer *p,
4094 			      const struct intel_dpll_hw_state *dpll_hw_state)
4095 {
4096 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4097 
4098 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4099 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4100 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4101 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4102 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4103 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4104 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4105 		   hw_state->mg_refclkin_ctl,
4106 		   hw_state->mg_clktop2_coreclkctl1,
4107 		   hw_state->mg_clktop2_hsclkctl,
4108 		   hw_state->mg_pll_div0,
4109 		   hw_state->mg_pll_div1,
4110 		   hw_state->mg_pll_lf,
4111 		   hw_state->mg_pll_frac_lock,
4112 		   hw_state->mg_pll_ssc,
4113 		   hw_state->mg_pll_bias,
4114 		   hw_state->mg_pll_tdc_coldst_bias);
4115 }
4116 
4117 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4118 				 const struct intel_dpll_hw_state *_b)
4119 {
4120 	const struct icl_dpll_hw_state *a = &_a->icl;
4121 	const struct icl_dpll_hw_state *b = &_b->icl;
4122 
4123 	/* FIXME split combo vs. mg more thoroughly */
4124 	return a->cfgcr0 == b->cfgcr0 &&
4125 		a->cfgcr1 == b->cfgcr1 &&
4126 		a->div0 == b->div0 &&
4127 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4128 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4129 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4130 		a->mg_pll_div0 == b->mg_pll_div0 &&
4131 		a->mg_pll_div1 == b->mg_pll_div1 &&
4132 		a->mg_pll_lf == b->mg_pll_lf &&
4133 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4134 		a->mg_pll_ssc == b->mg_pll_ssc &&
4135 		a->mg_pll_bias == b->mg_pll_bias &&
4136 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4137 }
4138 
4139 static const struct intel_dpll_funcs combo_pll_funcs = {
4140 	.enable = combo_pll_enable,
4141 	.disable = combo_pll_disable,
4142 	.get_hw_state = combo_pll_get_hw_state,
4143 	.get_freq = icl_ddi_combo_pll_get_freq,
4144 };
4145 
4146 static const struct intel_dpll_funcs tbt_pll_funcs = {
4147 	.enable = tbt_pll_enable,
4148 	.disable = tbt_pll_disable,
4149 	.get_hw_state = tbt_pll_get_hw_state,
4150 	.get_freq = icl_ddi_tbt_pll_get_freq,
4151 };
4152 
4153 static const struct intel_dpll_funcs mg_pll_funcs = {
4154 	.enable = mg_pll_enable,
4155 	.disable = mg_pll_disable,
4156 	.get_hw_state = mg_pll_get_hw_state,
4157 	.get_freq = icl_ddi_mg_pll_get_freq,
4158 };
4159 
4160 static const struct dpll_info icl_plls[] = {
4161 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4162 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4163 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4164 	  .is_alt_port_dpll = true, },
4165 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4166 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4167 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4168 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4169 	{}
4170 };
4171 
4172 static const struct intel_dpll_mgr icl_pll_mgr = {
4173 	.dpll_info = icl_plls,
4174 	.compute_dplls = icl_compute_dplls,
4175 	.get_dplls = icl_get_dplls,
4176 	.put_dplls = icl_put_dplls,
4177 	.update_active_dpll = icl_update_active_dpll,
4178 	.update_ref_clks = icl_update_dpll_ref_clks,
4179 	.dump_hw_state = icl_dump_hw_state,
4180 	.compare_hw_state = icl_compare_hw_state,
4181 };
4182 
4183 static const struct dpll_info ehl_plls[] = {
4184 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4185 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4186 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4187 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4188 	{}
4189 };
4190 
4191 static const struct intel_dpll_mgr ehl_pll_mgr = {
4192 	.dpll_info = ehl_plls,
4193 	.compute_dplls = icl_compute_dplls,
4194 	.get_dplls = icl_get_dplls,
4195 	.put_dplls = icl_put_dplls,
4196 	.update_ref_clks = icl_update_dpll_ref_clks,
4197 	.dump_hw_state = icl_dump_hw_state,
4198 	.compare_hw_state = icl_compare_hw_state,
4199 };
4200 
4201 static const struct intel_dpll_funcs dkl_pll_funcs = {
4202 	.enable = mg_pll_enable,
4203 	.disable = mg_pll_disable,
4204 	.get_hw_state = dkl_pll_get_hw_state,
4205 	.get_freq = icl_ddi_mg_pll_get_freq,
4206 };
4207 
4208 static const struct dpll_info tgl_plls[] = {
4209 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4210 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4211 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4212 	  .is_alt_port_dpll = true, },
4213 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4214 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4215 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4216 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4217 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4218 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4219 	{}
4220 };
4221 
4222 static const struct intel_dpll_mgr tgl_pll_mgr = {
4223 	.dpll_info = tgl_plls,
4224 	.compute_dplls = icl_compute_dplls,
4225 	.get_dplls = icl_get_dplls,
4226 	.put_dplls = icl_put_dplls,
4227 	.update_active_dpll = icl_update_active_dpll,
4228 	.update_ref_clks = icl_update_dpll_ref_clks,
4229 	.dump_hw_state = icl_dump_hw_state,
4230 	.compare_hw_state = icl_compare_hw_state,
4231 };
4232 
4233 static const struct dpll_info rkl_plls[] = {
4234 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4235 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4236 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4237 	{}
4238 };
4239 
4240 static const struct intel_dpll_mgr rkl_pll_mgr = {
4241 	.dpll_info = rkl_plls,
4242 	.compute_dplls = icl_compute_dplls,
4243 	.get_dplls = icl_get_dplls,
4244 	.put_dplls = icl_put_dplls,
4245 	.update_ref_clks = icl_update_dpll_ref_clks,
4246 	.dump_hw_state = icl_dump_hw_state,
4247 	.compare_hw_state = icl_compare_hw_state,
4248 };
4249 
4250 static const struct dpll_info dg1_plls[] = {
4251 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4252 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4253 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4254 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4255 	{}
4256 };
4257 
4258 static const struct intel_dpll_mgr dg1_pll_mgr = {
4259 	.dpll_info = dg1_plls,
4260 	.compute_dplls = icl_compute_dplls,
4261 	.get_dplls = icl_get_dplls,
4262 	.put_dplls = icl_put_dplls,
4263 	.update_ref_clks = icl_update_dpll_ref_clks,
4264 	.dump_hw_state = icl_dump_hw_state,
4265 	.compare_hw_state = icl_compare_hw_state,
4266 };
4267 
4268 static const struct dpll_info adls_plls[] = {
4269 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4270 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4271 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4272 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4273 	{}
4274 };
4275 
4276 static const struct intel_dpll_mgr adls_pll_mgr = {
4277 	.dpll_info = adls_plls,
4278 	.compute_dplls = icl_compute_dplls,
4279 	.get_dplls = icl_get_dplls,
4280 	.put_dplls = icl_put_dplls,
4281 	.update_ref_clks = icl_update_dpll_ref_clks,
4282 	.dump_hw_state = icl_dump_hw_state,
4283 	.compare_hw_state = icl_compare_hw_state,
4284 };
4285 
4286 static const struct dpll_info adlp_plls[] = {
4287 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4288 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4289 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4290 	  .is_alt_port_dpll = true, },
4291 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4292 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4293 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4294 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4295 	{}
4296 };
4297 
4298 static const struct intel_dpll_mgr adlp_pll_mgr = {
4299 	.dpll_info = adlp_plls,
4300 	.compute_dplls = icl_compute_dplls,
4301 	.get_dplls = icl_get_dplls,
4302 	.put_dplls = icl_put_dplls,
4303 	.update_active_dpll = icl_update_active_dpll,
4304 	.update_ref_clks = icl_update_dpll_ref_clks,
4305 	.dump_hw_state = icl_dump_hw_state,
4306 	.compare_hw_state = icl_compare_hw_state,
4307 };
4308 
4309 /**
4310  * intel_dpll_init - Initialize DPLLs
4311  * @display: intel_display device
4312  *
4313  * Initialize DPLLs for @display.
4314  */
4315 void intel_dpll_init(struct intel_display *display)
4316 {
4317 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4318 	const struct dpll_info *dpll_info;
4319 	int i;
4320 
4321 	mutex_init(&display->dpll.lock);
4322 
4323 	if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
4324 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4325 		dpll_mgr = NULL;
4326 	else if (display->platform.alderlake_p)
4327 		dpll_mgr = &adlp_pll_mgr;
4328 	else if (display->platform.alderlake_s)
4329 		dpll_mgr = &adls_pll_mgr;
4330 	else if (display->platform.dg1)
4331 		dpll_mgr = &dg1_pll_mgr;
4332 	else if (display->platform.rocketlake)
4333 		dpll_mgr = &rkl_pll_mgr;
4334 	else if (DISPLAY_VER(display) >= 12)
4335 		dpll_mgr = &tgl_pll_mgr;
4336 	else if (display->platform.jasperlake || display->platform.elkhartlake)
4337 		dpll_mgr = &ehl_pll_mgr;
4338 	else if (DISPLAY_VER(display) >= 11)
4339 		dpll_mgr = &icl_pll_mgr;
4340 	else if (display->platform.geminilake || display->platform.broxton)
4341 		dpll_mgr = &bxt_pll_mgr;
4342 	else if (DISPLAY_VER(display) == 9)
4343 		dpll_mgr = &skl_pll_mgr;
4344 	else if (HAS_DDI(display))
4345 		dpll_mgr = &hsw_pll_mgr;
4346 	else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
4347 		dpll_mgr = &pch_pll_mgr;
4348 
4349 	if (!dpll_mgr)
4350 		return;
4351 
4352 	dpll_info = dpll_mgr->dpll_info;
4353 
4354 	for (i = 0; dpll_info[i].name; i++) {
4355 		if (drm_WARN_ON(display->drm,
4356 				i >= ARRAY_SIZE(display->dpll.dplls)))
4357 			break;
4358 
4359 		/* must fit into unsigned long bitmask on 32bit */
4360 		if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
4361 			break;
4362 
4363 		display->dpll.dplls[i].info = &dpll_info[i];
4364 		display->dpll.dplls[i].index = i;
4365 	}
4366 
4367 	display->dpll.mgr = dpll_mgr;
4368 	display->dpll.num_dpll = i;
4369 }
4370 
4371 /**
4372  * intel_dpll_compute - compute DPLL state CRTC and encoder combination
4373  * @state: atomic state
4374  * @crtc: CRTC to compute DPLLs for
4375  * @encoder: encoder
4376  *
4377  * This function computes the DPLL state for the given CRTC and encoder.
4378  *
4379  * The new configuration in the atomic commit @state is made effective by
4380  * calling intel_dpll_swap_state().
4381  *
4382  * Returns:
4383  * 0 on success, negative error code on failure.
4384  */
4385 int intel_dpll_compute(struct intel_atomic_state *state,
4386 		       struct intel_crtc *crtc,
4387 		       struct intel_encoder *encoder)
4388 {
4389 	struct intel_display *display = to_intel_display(state);
4390 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4391 
4392 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4393 		return -EINVAL;
4394 
4395 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4396 }
4397 
4398 /**
4399  * intel_dpll_reserve - reserve DPLLs for CRTC and encoder combination
4400  * @state: atomic state
4401  * @crtc: CRTC to reserve DPLLs for
4402  * @encoder: encoder
4403  *
4404  * This function reserves all required DPLLs for the given CRTC and encoder
4405  * combination in the current atomic commit @state and the new @crtc atomic
4406  * state.
4407  *
4408  * The new configuration in the atomic commit @state is made effective by
4409  * calling intel_dpll_swap_state().
4410  *
4411  * The reserved DPLLs should be released by calling
4412  * intel_dpll_release().
4413  *
4414  * Returns:
4415  * 0 if all required DPLLs were successfully reserved,
4416  * negative error code otherwise.
4417  */
4418 int intel_dpll_reserve(struct intel_atomic_state *state,
4419 		       struct intel_crtc *crtc,
4420 		       struct intel_encoder *encoder)
4421 {
4422 	struct intel_display *display = to_intel_display(state);
4423 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4424 
4425 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4426 		return -EINVAL;
4427 
4428 	return dpll_mgr->get_dplls(state, crtc, encoder);
4429 }
4430 
4431 /**
4432  * intel_dpll_release - end use of DPLLs by CRTC in atomic state
4433  * @state: atomic state
4434  * @crtc: crtc from which the DPLLs are to be released
4435  *
4436  * This function releases all DPLLs reserved by intel_dpll_reserve()
4437  * from the current atomic commit @state and the old @crtc atomic state.
4438  *
4439  * The new configuration in the atomic commit @state is made effective by
4440  * calling intel_dpll_swap_state().
4441  */
4442 void intel_dpll_release(struct intel_atomic_state *state,
4443 			struct intel_crtc *crtc)
4444 {
4445 	struct intel_display *display = to_intel_display(state);
4446 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4447 
4448 	/*
4449 	 * FIXME: this function is called for every platform having a
4450 	 * compute_clock hook, even though the platform doesn't yet support
4451 	 * the DPLL framework and intel_dpll_reserve() is not
4452 	 * called on those.
4453 	 */
4454 	if (!dpll_mgr)
4455 		return;
4456 
4457 	dpll_mgr->put_dplls(state, crtc);
4458 }
4459 
4460 /**
4461  * intel_dpll_update_active - update the active DPLL for a CRTC/encoder
4462  * @state: atomic state
4463  * @crtc: the CRTC for which to update the active DPLL
4464  * @encoder: encoder determining the type of port DPLL
4465  *
4466  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4467  * from the port DPLLs reserved previously by intel_dpll_reserve(). The
4468  * DPLL selected will be based on the current mode of the encoder's port.
4469  */
4470 void intel_dpll_update_active(struct intel_atomic_state *state,
4471 			      struct intel_crtc *crtc,
4472 			      struct intel_encoder *encoder)
4473 {
4474 	struct intel_display *display = to_intel_display(encoder);
4475 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4476 
4477 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4478 		return;
4479 
4480 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4481 }
4482 
4483 /**
4484  * intel_dpll_get_freq - calculate the DPLL's output frequency
4485  * @display: intel_display device
4486  * @pll: DPLL for which to calculate the output frequency
4487  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4488  *
4489  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4490  */
4491 int intel_dpll_get_freq(struct intel_display *display,
4492 			const struct intel_dpll *pll,
4493 			const struct intel_dpll_hw_state *dpll_hw_state)
4494 {
4495 	if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
4496 		return 0;
4497 
4498 	return pll->info->funcs->get_freq(display, pll, dpll_hw_state);
4499 }
4500 
4501 /**
4502  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4503  * @display: intel_display device instance
4504  * @pll: DPLL for which to calculate the output frequency
4505  * @dpll_hw_state: DPLL's hardware state
4506  *
4507  * Read out @pll's hardware state into @dpll_hw_state.
4508  */
4509 bool intel_dpll_get_hw_state(struct intel_display *display,
4510 			     struct intel_dpll *pll,
4511 			     struct intel_dpll_hw_state *dpll_hw_state)
4512 {
4513 	return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
4514 }
4515 
4516 static void readout_dpll_hw_state(struct intel_display *display,
4517 				  struct intel_dpll *pll)
4518 {
4519 	struct intel_crtc *crtc;
4520 
4521 	pll->on = intel_dpll_get_hw_state(display, pll, &pll->state.hw_state);
4522 
4523 	if (pll->on && pll->info->power_domain)
4524 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
4525 
4526 	pll->state.pipe_mask = 0;
4527 	for_each_intel_crtc(display->drm, crtc) {
4528 		struct intel_crtc_state *crtc_state =
4529 			to_intel_crtc_state(crtc->base.state);
4530 
4531 		if (crtc_state->hw.active && crtc_state->intel_dpll == pll)
4532 			intel_dpll_crtc_get(crtc, pll, &pll->state);
4533 	}
4534 	pll->active_mask = pll->state.pipe_mask;
4535 
4536 	drm_dbg_kms(display->drm,
4537 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4538 		    pll->info->name, pll->state.pipe_mask, pll->on);
4539 }
4540 
4541 void intel_dpll_update_ref_clks(struct intel_display *display)
4542 {
4543 	if (display->dpll.mgr && display->dpll.mgr->update_ref_clks)
4544 		display->dpll.mgr->update_ref_clks(display);
4545 }
4546 
4547 void intel_dpll_readout_hw_state(struct intel_display *display)
4548 {
4549 	struct intel_dpll *pll;
4550 	int i;
4551 
4552 	for_each_dpll(display, pll, i)
4553 		readout_dpll_hw_state(display, pll);
4554 }
4555 
4556 static void sanitize_dpll_state(struct intel_display *display,
4557 				struct intel_dpll *pll)
4558 {
4559 	if (!pll->on)
4560 		return;
4561 
4562 	adlp_cmtg_clock_gating_wa(display, pll);
4563 
4564 	if (pll->active_mask)
4565 		return;
4566 
4567 	drm_dbg_kms(display->drm,
4568 		    "%s enabled but not in use, disabling\n",
4569 		    pll->info->name);
4570 
4571 	_intel_disable_shared_dpll(display, pll);
4572 }
4573 
4574 void intel_dpll_sanitize_state(struct intel_display *display)
4575 {
4576 	struct intel_dpll *pll;
4577 	int i;
4578 
4579 	intel_cx0_pll_power_save_wa(display);
4580 
4581 	for_each_dpll(display, pll, i)
4582 		sanitize_dpll_state(display, pll);
4583 }
4584 
4585 /**
4586  * intel_dpll_dump_hw_state - dump hw_state
4587  * @display: intel_display structure
4588  * @p: where to print the state to
4589  * @dpll_hw_state: hw state to be dumped
4590  *
4591  * Dumo out the relevant values in @dpll_hw_state.
4592  */
4593 void intel_dpll_dump_hw_state(struct intel_display *display,
4594 			      struct drm_printer *p,
4595 			      const struct intel_dpll_hw_state *dpll_hw_state)
4596 {
4597 	if (display->dpll.mgr) {
4598 		display->dpll.mgr->dump_hw_state(p, dpll_hw_state);
4599 	} else {
4600 		/* fallback for platforms that don't use the shared dpll
4601 		 * infrastructure
4602 		 */
4603 		ibx_dump_hw_state(p, dpll_hw_state);
4604 	}
4605 }
4606 
4607 /**
4608  * intel_dpll_compare_hw_state - compare the two states
4609  * @display: intel_display structure
4610  * @a: first DPLL hw state
4611  * @b: second DPLL hw state
4612  *
4613  * Compare DPLL hw states @a and @b.
4614  *
4615  * Returns: true if the states are equal, false if the differ
4616  */
4617 bool intel_dpll_compare_hw_state(struct intel_display *display,
4618 				 const struct intel_dpll_hw_state *a,
4619 				 const struct intel_dpll_hw_state *b)
4620 {
4621 	if (display->dpll.mgr) {
4622 		return display->dpll.mgr->compare_hw_state(a, b);
4623 	} else {
4624 		/* fallback for platforms that don't use the shared dpll
4625 		 * infrastructure
4626 		 */
4627 		return ibx_compare_hw_state(a, b);
4628 	}
4629 }
4630 
4631 static void
4632 verify_single_dpll_state(struct intel_display *display,
4633 			 struct intel_dpll *pll,
4634 			 struct intel_crtc *crtc,
4635 			 const struct intel_crtc_state *new_crtc_state)
4636 {
4637 	struct intel_dpll_hw_state dpll_hw_state = {};
4638 	u8 pipe_mask;
4639 	bool active;
4640 
4641 	active = intel_dpll_get_hw_state(display, pll, &dpll_hw_state);
4642 
4643 	if (!pll->info->always_on) {
4644 		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4645 					 "%s: pll in active use but not on in sw tracking\n",
4646 					 pll->info->name);
4647 		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4648 					 "%s: pll is on but not used by any active pipe\n",
4649 					 pll->info->name);
4650 		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4651 					 "%s: pll on state mismatch (expected %i, found %i)\n",
4652 					 pll->info->name, pll->on, active);
4653 	}
4654 
4655 	if (!crtc) {
4656 		INTEL_DISPLAY_STATE_WARN(display,
4657 					 pll->active_mask & ~pll->state.pipe_mask,
4658 					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4659 					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4660 
4661 		return;
4662 	}
4663 
4664 	pipe_mask = BIT(crtc->pipe);
4665 
4666 	if (new_crtc_state->hw.active)
4667 		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4668 					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4669 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4670 	else
4671 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4672 					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4673 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4674 
4675 	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4676 				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4677 				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4678 
4679 	INTEL_DISPLAY_STATE_WARN(display,
4680 				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4681 						   sizeof(dpll_hw_state)),
4682 				 "%s: pll hw state mismatch\n",
4683 				 pll->info->name);
4684 }
4685 
4686 static bool has_alt_port_dpll(const struct intel_dpll *old_pll,
4687 			      const struct intel_dpll *new_pll)
4688 {
4689 	return old_pll && new_pll && old_pll != new_pll &&
4690 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4691 }
4692 
4693 void intel_dpll_state_verify(struct intel_atomic_state *state,
4694 			     struct intel_crtc *crtc)
4695 {
4696 	struct intel_display *display = to_intel_display(state);
4697 	const struct intel_crtc_state *old_crtc_state =
4698 		intel_atomic_get_old_crtc_state(state, crtc);
4699 	const struct intel_crtc_state *new_crtc_state =
4700 		intel_atomic_get_new_crtc_state(state, crtc);
4701 
4702 	if (new_crtc_state->intel_dpll)
4703 		verify_single_dpll_state(display, new_crtc_state->intel_dpll,
4704 					 crtc, new_crtc_state);
4705 
4706 	if (old_crtc_state->intel_dpll &&
4707 	    old_crtc_state->intel_dpll != new_crtc_state->intel_dpll) {
4708 		u8 pipe_mask = BIT(crtc->pipe);
4709 		struct intel_dpll *pll = old_crtc_state->intel_dpll;
4710 
4711 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4712 					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4713 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4714 
4715 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4716 		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->intel_dpll,
4717 								     new_crtc_state->intel_dpll) &&
4718 					 pll->state.pipe_mask & pipe_mask,
4719 					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4720 					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4721 	}
4722 }
4723 
4724 void intel_dpll_verify_disabled(struct intel_atomic_state *state)
4725 {
4726 	struct intel_display *display = to_intel_display(state);
4727 	struct intel_dpll *pll;
4728 	int i;
4729 
4730 	for_each_dpll(display, pll, i)
4731 		verify_single_dpll_state(display, pll, NULL, NULL);
4732 }
4733