xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision 25489a4f556414445d342951615178368ee45cde)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include <drm/drm_print.h>
28 
29 #include "bxt_dpio_phy_regs.h"
30 #include "i915_reg.h"
31 #include "i915_utils.h"
32 #include "intel_cx0_phy.h"
33 #include "intel_de.h"
34 #include "intel_display_types.h"
35 #include "intel_dkl_phy.h"
36 #include "intel_dkl_phy_regs.h"
37 #include "intel_dpio_phy.h"
38 #include "intel_dpll.h"
39 #include "intel_dpll_mgr.h"
40 #include "intel_hti.h"
41 #include "intel_mg_phy_regs.h"
42 #include "intel_pch_refclk.h"
43 #include "intel_step.h"
44 #include "intel_tc.h"
45 
46 /**
47  * DOC: Display PLLs
48  *
49  * Display PLLs used for driving outputs vary by platform. While some have
50  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
51  * from a pool. In the latter scenario, it is possible that multiple pipes
52  * share a PLL if their configurations match.
53  *
54  * This file provides an abstraction over display PLLs. The function
55  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
56  * users of a PLL are tracked and that tracking is integrated with the atomic
57  * modset interface. During an atomic operation, required PLLs can be reserved
58  * for a given CRTC and encoder configuration by calling
59  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
60  * with intel_release_shared_dplls().
61  * Changes to the users are first staged in the atomic state, and then made
62  * effective by calling intel_shared_dpll_swap_state() during the atomic
63  * commit phase.
64  */
65 
66 /* platform specific hooks for managing DPLLs */
67 struct intel_shared_dpll_funcs {
68 	/*
69 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
70 	 * the pll is not already enabled.
71 	 */
72 	void (*enable)(struct intel_display *display,
73 		       struct intel_shared_dpll *pll,
74 		       const struct intel_dpll_hw_state *dpll_hw_state);
75 
76 	/*
77 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
78 	 * only when it is safe to disable the pll, i.e., there are no more
79 	 * tracked users for it.
80 	 */
81 	void (*disable)(struct intel_display *display,
82 			struct intel_shared_dpll *pll);
83 
84 	/*
85 	 * Hook for reading the values currently programmed to the DPLL
86 	 * registers. This is used for initial hw state readout and state
87 	 * verification after a mode set.
88 	 */
89 	bool (*get_hw_state)(struct intel_display *display,
90 			     struct intel_shared_dpll *pll,
91 			     struct intel_dpll_hw_state *dpll_hw_state);
92 
93 	/*
94 	 * Hook for calculating the pll's output frequency based on its passed
95 	 * in state.
96 	 */
97 	int (*get_freq)(struct intel_display *i915,
98 			const struct intel_shared_dpll *pll,
99 			const struct intel_dpll_hw_state *dpll_hw_state);
100 };
101 
102 struct intel_dpll_mgr {
103 	const struct dpll_info *dpll_info;
104 
105 	int (*compute_dplls)(struct intel_atomic_state *state,
106 			     struct intel_crtc *crtc,
107 			     struct intel_encoder *encoder);
108 	int (*get_dplls)(struct intel_atomic_state *state,
109 			 struct intel_crtc *crtc,
110 			 struct intel_encoder *encoder);
111 	void (*put_dplls)(struct intel_atomic_state *state,
112 			  struct intel_crtc *crtc);
113 	void (*update_active_dpll)(struct intel_atomic_state *state,
114 				   struct intel_crtc *crtc,
115 				   struct intel_encoder *encoder);
116 	void (*update_ref_clks)(struct intel_display *display);
117 	void (*dump_hw_state)(struct drm_printer *p,
118 			      const struct intel_dpll_hw_state *dpll_hw_state);
119 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
120 				 const struct intel_dpll_hw_state *b);
121 };
122 
123 static void
124 intel_atomic_duplicate_dpll_state(struct intel_display *display,
125 				  struct intel_shared_dpll_state *shared_dpll)
126 {
127 	struct intel_shared_dpll *pll;
128 	int i;
129 
130 	/* Copy shared dpll state */
131 	for_each_shared_dpll(display, pll, i)
132 		shared_dpll[pll->index] = pll->state;
133 }
134 
135 static struct intel_shared_dpll_state *
136 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
137 {
138 	struct intel_atomic_state *state = to_intel_atomic_state(s);
139 	struct intel_display *display = to_intel_display(state);
140 
141 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
142 
143 	if (!state->dpll_set) {
144 		state->dpll_set = true;
145 
146 		intel_atomic_duplicate_dpll_state(display,
147 						  state->shared_dpll);
148 	}
149 
150 	return state->shared_dpll;
151 }
152 
153 /**
154  * intel_get_shared_dpll_by_id - get a DPLL given its id
155  * @display: intel_display device instance
156  * @id: pll id
157  *
158  * Returns:
159  * A pointer to the DPLL with @id
160  */
161 struct intel_shared_dpll *
162 intel_get_shared_dpll_by_id(struct intel_display *display,
163 			    enum intel_dpll_id id)
164 {
165 	struct intel_shared_dpll *pll;
166 	int i;
167 
168 	for_each_shared_dpll(display, pll, i) {
169 		if (pll->info->id == id)
170 			return pll;
171 	}
172 
173 	MISSING_CASE(id);
174 	return NULL;
175 }
176 
177 /* For ILK+ */
178 void assert_shared_dpll(struct intel_display *display,
179 			struct intel_shared_dpll *pll,
180 			bool state)
181 {
182 	bool cur_state;
183 	struct intel_dpll_hw_state hw_state;
184 
185 	if (drm_WARN(display->drm, !pll,
186 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
187 		return;
188 
189 	cur_state = intel_dpll_get_hw_state(display, pll, &hw_state);
190 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
191 				 "%s assertion failure (expected %s, current %s)\n",
192 				 pll->info->name, str_on_off(state),
193 				 str_on_off(cur_state));
194 }
195 
196 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
197 {
198 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
199 }
200 
201 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
202 {
203 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
204 }
205 
206 static i915_reg_t
207 intel_combo_pll_enable_reg(struct intel_display *display,
208 			   struct intel_shared_dpll *pll)
209 {
210 	if (display->platform.dg1)
211 		return DG1_DPLL_ENABLE(pll->info->id);
212 	else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
213 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
214 		return MG_PLL_ENABLE(0);
215 
216 	return ICL_DPLL_ENABLE(pll->info->id);
217 }
218 
219 static i915_reg_t
220 intel_tc_pll_enable_reg(struct intel_display *display,
221 			struct intel_shared_dpll *pll)
222 {
223 	const enum intel_dpll_id id = pll->info->id;
224 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
225 
226 	if (display->platform.alderlake_p)
227 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
228 
229 	return MG_PLL_ENABLE(tc_port);
230 }
231 
232 static void _intel_enable_shared_dpll(struct intel_display *display,
233 				      struct intel_shared_dpll *pll)
234 {
235 	if (pll->info->power_domain)
236 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
237 
238 	pll->info->funcs->enable(display, pll, &pll->state.hw_state);
239 	pll->on = true;
240 }
241 
242 static void _intel_disable_shared_dpll(struct intel_display *display,
243 				       struct intel_shared_dpll *pll)
244 {
245 	pll->info->funcs->disable(display, pll);
246 	pll->on = false;
247 
248 	if (pll->info->power_domain)
249 		intel_display_power_put(display, pll->info->power_domain, pll->wakeref);
250 }
251 
252 /**
253  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
254  * @crtc_state: CRTC, and its state, which has a shared DPLL
255  *
256  * Enable the shared DPLL used by @crtc.
257  */
258 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
259 {
260 	struct intel_display *display = to_intel_display(crtc_state);
261 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
262 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
263 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
264 	unsigned int old_mask;
265 
266 	if (drm_WARN_ON(display->drm, !pll))
267 		return;
268 
269 	mutex_lock(&display->dpll.lock);
270 	old_mask = pll->active_mask;
271 
272 	if (drm_WARN_ON(display->drm, !(pll->state.pipe_mask & pipe_mask)) ||
273 	    drm_WARN_ON(display->drm, pll->active_mask & pipe_mask))
274 		goto out;
275 
276 	pll->active_mask |= pipe_mask;
277 
278 	drm_dbg_kms(display->drm,
279 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
280 		    pll->info->name, pll->active_mask, pll->on,
281 		    crtc->base.base.id, crtc->base.name);
282 
283 	if (old_mask) {
284 		drm_WARN_ON(display->drm, !pll->on);
285 		assert_shared_dpll_enabled(display, pll);
286 		goto out;
287 	}
288 	drm_WARN_ON(display->drm, pll->on);
289 
290 	drm_dbg_kms(display->drm, "enabling %s\n", pll->info->name);
291 
292 	_intel_enable_shared_dpll(display, pll);
293 
294 out:
295 	mutex_unlock(&display->dpll.lock);
296 }
297 
298 /**
299  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
300  * @crtc_state: CRTC, and its state, which has a shared DPLL
301  *
302  * Disable the shared DPLL used by @crtc.
303  */
304 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
305 {
306 	struct intel_display *display = to_intel_display(crtc_state);
307 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
308 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
309 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
310 
311 	/* PCH only available on ILK+ */
312 	if (DISPLAY_VER(display) < 5)
313 		return;
314 
315 	if (pll == NULL)
316 		return;
317 
318 	mutex_lock(&display->dpll.lock);
319 	if (drm_WARN(display->drm, !(pll->active_mask & pipe_mask),
320 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
321 		     crtc->base.base.id, crtc->base.name))
322 		goto out;
323 
324 	drm_dbg_kms(display->drm,
325 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
326 		    pll->info->name, pll->active_mask, pll->on,
327 		    crtc->base.base.id, crtc->base.name);
328 
329 	assert_shared_dpll_enabled(display, pll);
330 	drm_WARN_ON(display->drm, !pll->on);
331 
332 	pll->active_mask &= ~pipe_mask;
333 	if (pll->active_mask)
334 		goto out;
335 
336 	drm_dbg_kms(display->drm, "disabling %s\n", pll->info->name);
337 
338 	_intel_disable_shared_dpll(display, pll);
339 
340 out:
341 	mutex_unlock(&display->dpll.lock);
342 }
343 
344 static unsigned long
345 intel_dpll_mask_all(struct intel_display *display)
346 {
347 	struct intel_shared_dpll *pll;
348 	unsigned long dpll_mask = 0;
349 	int i;
350 
351 	for_each_shared_dpll(display, pll, i) {
352 		drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
353 
354 		dpll_mask |= BIT(pll->info->id);
355 	}
356 
357 	return dpll_mask;
358 }
359 
360 static struct intel_shared_dpll *
361 intel_find_shared_dpll(struct intel_atomic_state *state,
362 		       const struct intel_crtc *crtc,
363 		       const struct intel_dpll_hw_state *dpll_hw_state,
364 		       unsigned long dpll_mask)
365 {
366 	struct intel_display *display = to_intel_display(crtc);
367 	unsigned long dpll_mask_all = intel_dpll_mask_all(display);
368 	struct intel_shared_dpll_state *shared_dpll;
369 	struct intel_shared_dpll *unused_pll = NULL;
370 	enum intel_dpll_id id;
371 
372 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
373 
374 	drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
375 
376 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
377 		struct intel_shared_dpll *pll;
378 
379 		pll = intel_get_shared_dpll_by_id(display, id);
380 		if (!pll)
381 			continue;
382 
383 		/* Only want to check enabled timings first */
384 		if (shared_dpll[pll->index].pipe_mask == 0) {
385 			if (!unused_pll)
386 				unused_pll = pll;
387 			continue;
388 		}
389 
390 		if (memcmp(dpll_hw_state,
391 			   &shared_dpll[pll->index].hw_state,
392 			   sizeof(*dpll_hw_state)) == 0) {
393 			drm_dbg_kms(display->drm,
394 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
395 				    crtc->base.base.id, crtc->base.name,
396 				    pll->info->name,
397 				    shared_dpll[pll->index].pipe_mask,
398 				    pll->active_mask);
399 			return pll;
400 		}
401 	}
402 
403 	/* Ok no matching timings, maybe there's a free one? */
404 	if (unused_pll) {
405 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] allocated %s\n",
406 			    crtc->base.base.id, crtc->base.name,
407 			    unused_pll->info->name);
408 		return unused_pll;
409 	}
410 
411 	return NULL;
412 }
413 
414 /**
415  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
416  * @crtc: CRTC on which behalf the reference is taken
417  * @pll: DPLL for which the reference is taken
418  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
419  *
420  * Take a reference for @pll tracking the use of it by @crtc.
421  */
422 static void
423 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
424 				 const struct intel_shared_dpll *pll,
425 				 struct intel_shared_dpll_state *shared_dpll_state)
426 {
427 	struct intel_display *display = to_intel_display(crtc);
428 
429 	drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
430 
431 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
432 
433 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
434 		    crtc->base.base.id, crtc->base.name, pll->info->name);
435 }
436 
437 static void
438 intel_reference_shared_dpll(struct intel_atomic_state *state,
439 			    const struct intel_crtc *crtc,
440 			    const struct intel_shared_dpll *pll,
441 			    const struct intel_dpll_hw_state *dpll_hw_state)
442 {
443 	struct intel_shared_dpll_state *shared_dpll;
444 
445 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
446 
447 	if (shared_dpll[pll->index].pipe_mask == 0)
448 		shared_dpll[pll->index].hw_state = *dpll_hw_state;
449 
450 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
451 }
452 
453 /**
454  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
455  * @crtc: CRTC on which behalf the reference is dropped
456  * @pll: DPLL for which the reference is dropped
457  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
458  *
459  * Drop a reference for @pll tracking the end of use of it by @crtc.
460  */
461 void
462 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
463 				   const struct intel_shared_dpll *pll,
464 				   struct intel_shared_dpll_state *shared_dpll_state)
465 {
466 	struct intel_display *display = to_intel_display(crtc);
467 
468 	drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
469 
470 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
471 
472 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
473 		    crtc->base.base.id, crtc->base.name, pll->info->name);
474 }
475 
476 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
477 					  const struct intel_crtc *crtc,
478 					  const struct intel_shared_dpll *pll)
479 {
480 	struct intel_shared_dpll_state *shared_dpll;
481 
482 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
483 
484 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
485 }
486 
487 static void intel_put_dpll(struct intel_atomic_state *state,
488 			   struct intel_crtc *crtc)
489 {
490 	const struct intel_crtc_state *old_crtc_state =
491 		intel_atomic_get_old_crtc_state(state, crtc);
492 	struct intel_crtc_state *new_crtc_state =
493 		intel_atomic_get_new_crtc_state(state, crtc);
494 
495 	new_crtc_state->shared_dpll = NULL;
496 
497 	if (!old_crtc_state->shared_dpll)
498 		return;
499 
500 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
501 }
502 
503 /**
504  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
505  * @state: atomic state
506  *
507  * This is the dpll version of drm_atomic_helper_swap_state() since the
508  * helper does not handle driver-specific global state.
509  *
510  * For consistency with atomic helpers this function does a complete swap,
511  * i.e. it also puts the current state into @state, even though there is no
512  * need for that at this moment.
513  */
514 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
515 {
516 	struct intel_display *display = to_intel_display(state);
517 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
518 	struct intel_shared_dpll *pll;
519 	int i;
520 
521 	if (!state->dpll_set)
522 		return;
523 
524 	for_each_shared_dpll(display, pll, i)
525 		swap(pll->state, shared_dpll[pll->index]);
526 }
527 
528 static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
529 				      struct intel_shared_dpll *pll,
530 				      struct intel_dpll_hw_state *dpll_hw_state)
531 {
532 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
533 	const enum intel_dpll_id id = pll->info->id;
534 	intel_wakeref_t wakeref;
535 	u32 val;
536 
537 	wakeref = intel_display_power_get_if_enabled(display,
538 						     POWER_DOMAIN_DISPLAY_CORE);
539 	if (!wakeref)
540 		return false;
541 
542 	val = intel_de_read(display, PCH_DPLL(id));
543 	hw_state->dpll = val;
544 	hw_state->fp0 = intel_de_read(display, PCH_FP0(id));
545 	hw_state->fp1 = intel_de_read(display, PCH_FP1(id));
546 
547 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
548 
549 	return val & DPLL_VCO_ENABLE;
550 }
551 
552 static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
553 {
554 	u32 val;
555 	bool enabled;
556 
557 	val = intel_de_read(display, PCH_DREF_CONTROL);
558 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
559 			    DREF_SUPERSPREAD_SOURCE_MASK));
560 	INTEL_DISPLAY_STATE_WARN(display, !enabled,
561 				 "PCH refclk assertion failure, should be active but is disabled\n");
562 }
563 
564 static void ibx_pch_dpll_enable(struct intel_display *display,
565 				struct intel_shared_dpll *pll,
566 				const struct intel_dpll_hw_state *dpll_hw_state)
567 {
568 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
569 	const enum intel_dpll_id id = pll->info->id;
570 
571 	/* PCH refclock must be enabled first */
572 	ibx_assert_pch_refclk_enabled(display);
573 
574 	intel_de_write(display, PCH_FP0(id), hw_state->fp0);
575 	intel_de_write(display, PCH_FP1(id), hw_state->fp1);
576 
577 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
578 
579 	/* Wait for the clocks to stabilize. */
580 	intel_de_posting_read(display, PCH_DPLL(id));
581 	udelay(150);
582 
583 	/* The pixel multiplier can only be updated once the
584 	 * DPLL is enabled and the clocks are stable.
585 	 *
586 	 * So write it again.
587 	 */
588 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
589 	intel_de_posting_read(display, PCH_DPLL(id));
590 	udelay(200);
591 }
592 
593 static void ibx_pch_dpll_disable(struct intel_display *display,
594 				 struct intel_shared_dpll *pll)
595 {
596 	const enum intel_dpll_id id = pll->info->id;
597 
598 	intel_de_write(display, PCH_DPLL(id), 0);
599 	intel_de_posting_read(display, PCH_DPLL(id));
600 	udelay(200);
601 }
602 
603 static int ibx_compute_dpll(struct intel_atomic_state *state,
604 			    struct intel_crtc *crtc,
605 			    struct intel_encoder *encoder)
606 {
607 	return 0;
608 }
609 
610 static int ibx_get_dpll(struct intel_atomic_state *state,
611 			struct intel_crtc *crtc,
612 			struct intel_encoder *encoder)
613 {
614 	struct intel_display *display = to_intel_display(state);
615 	struct intel_crtc_state *crtc_state =
616 		intel_atomic_get_new_crtc_state(state, crtc);
617 	struct intel_shared_dpll *pll;
618 	enum intel_dpll_id id;
619 
620 	if (HAS_PCH_IBX(display)) {
621 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
622 		id = (enum intel_dpll_id) crtc->pipe;
623 		pll = intel_get_shared_dpll_by_id(display, id);
624 
625 		drm_dbg_kms(display->drm,
626 			    "[CRTC:%d:%s] using pre-allocated %s\n",
627 			    crtc->base.base.id, crtc->base.name,
628 			    pll->info->name);
629 	} else {
630 		pll = intel_find_shared_dpll(state, crtc,
631 					     &crtc_state->dpll_hw_state,
632 					     BIT(DPLL_ID_PCH_PLL_B) |
633 					     BIT(DPLL_ID_PCH_PLL_A));
634 	}
635 
636 	if (!pll)
637 		return -EINVAL;
638 
639 	/* reference the pll */
640 	intel_reference_shared_dpll(state, crtc,
641 				    pll, &crtc_state->dpll_hw_state);
642 
643 	crtc_state->shared_dpll = pll;
644 
645 	return 0;
646 }
647 
648 static void ibx_dump_hw_state(struct drm_printer *p,
649 			      const struct intel_dpll_hw_state *dpll_hw_state)
650 {
651 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
652 
653 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
654 		   "fp0: 0x%x, fp1: 0x%x\n",
655 		   hw_state->dpll,
656 		   hw_state->dpll_md,
657 		   hw_state->fp0,
658 		   hw_state->fp1);
659 }
660 
661 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
662 				 const struct intel_dpll_hw_state *_b)
663 {
664 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
665 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
666 
667 	return a->dpll == b->dpll &&
668 		a->dpll_md == b->dpll_md &&
669 		a->fp0 == b->fp0 &&
670 		a->fp1 == b->fp1;
671 }
672 
673 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
674 	.enable = ibx_pch_dpll_enable,
675 	.disable = ibx_pch_dpll_disable,
676 	.get_hw_state = ibx_pch_dpll_get_hw_state,
677 };
678 
679 static const struct dpll_info pch_plls[] = {
680 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
681 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
682 	{}
683 };
684 
685 static const struct intel_dpll_mgr pch_pll_mgr = {
686 	.dpll_info = pch_plls,
687 	.compute_dplls = ibx_compute_dpll,
688 	.get_dplls = ibx_get_dpll,
689 	.put_dplls = intel_put_dpll,
690 	.dump_hw_state = ibx_dump_hw_state,
691 	.compare_hw_state = ibx_compare_hw_state,
692 };
693 
694 static void hsw_ddi_wrpll_enable(struct intel_display *display,
695 				 struct intel_shared_dpll *pll,
696 				 const struct intel_dpll_hw_state *dpll_hw_state)
697 {
698 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
699 	const enum intel_dpll_id id = pll->info->id;
700 
701 	intel_de_write(display, WRPLL_CTL(id), hw_state->wrpll);
702 	intel_de_posting_read(display, WRPLL_CTL(id));
703 	udelay(20);
704 }
705 
706 static void hsw_ddi_spll_enable(struct intel_display *display,
707 				struct intel_shared_dpll *pll,
708 				const struct intel_dpll_hw_state *dpll_hw_state)
709 {
710 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
711 
712 	intel_de_write(display, SPLL_CTL, hw_state->spll);
713 	intel_de_posting_read(display, SPLL_CTL);
714 	udelay(20);
715 }
716 
717 static void hsw_ddi_wrpll_disable(struct intel_display *display,
718 				  struct intel_shared_dpll *pll)
719 {
720 	const enum intel_dpll_id id = pll->info->id;
721 
722 	intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
723 	intel_de_posting_read(display, WRPLL_CTL(id));
724 
725 	/*
726 	 * Try to set up the PCH reference clock once all DPLLs
727 	 * that depend on it have been shut down.
728 	 */
729 	if (display->dpll.pch_ssc_use & BIT(id))
730 		intel_init_pch_refclk(display);
731 }
732 
733 static void hsw_ddi_spll_disable(struct intel_display *display,
734 				 struct intel_shared_dpll *pll)
735 {
736 	enum intel_dpll_id id = pll->info->id;
737 
738 	intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
739 	intel_de_posting_read(display, SPLL_CTL);
740 
741 	/*
742 	 * Try to set up the PCH reference clock once all DPLLs
743 	 * that depend on it have been shut down.
744 	 */
745 	if (display->dpll.pch_ssc_use & BIT(id))
746 		intel_init_pch_refclk(display);
747 }
748 
749 static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
750 				       struct intel_shared_dpll *pll,
751 				       struct intel_dpll_hw_state *dpll_hw_state)
752 {
753 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
754 	const enum intel_dpll_id id = pll->info->id;
755 	intel_wakeref_t wakeref;
756 	u32 val;
757 
758 	wakeref = intel_display_power_get_if_enabled(display,
759 						     POWER_DOMAIN_DISPLAY_CORE);
760 	if (!wakeref)
761 		return false;
762 
763 	val = intel_de_read(display, WRPLL_CTL(id));
764 	hw_state->wrpll = val;
765 
766 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
767 
768 	return val & WRPLL_PLL_ENABLE;
769 }
770 
771 static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
772 				      struct intel_shared_dpll *pll,
773 				      struct intel_dpll_hw_state *dpll_hw_state)
774 {
775 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
776 	intel_wakeref_t wakeref;
777 	u32 val;
778 
779 	wakeref = intel_display_power_get_if_enabled(display,
780 						     POWER_DOMAIN_DISPLAY_CORE);
781 	if (!wakeref)
782 		return false;
783 
784 	val = intel_de_read(display, SPLL_CTL);
785 	hw_state->spll = val;
786 
787 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
788 
789 	return val & SPLL_PLL_ENABLE;
790 }
791 
792 #define LC_FREQ 2700
793 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
794 
795 #define P_MIN 2
796 #define P_MAX 64
797 #define P_INC 2
798 
799 /* Constraints for PLL good behavior */
800 #define REF_MIN 48
801 #define REF_MAX 400
802 #define VCO_MIN 2400
803 #define VCO_MAX 4800
804 
805 struct hsw_wrpll_rnp {
806 	unsigned p, n2, r2;
807 };
808 
809 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
810 {
811 	switch (clock) {
812 	case 25175000:
813 	case 25200000:
814 	case 27000000:
815 	case 27027000:
816 	case 37762500:
817 	case 37800000:
818 	case 40500000:
819 	case 40541000:
820 	case 54000000:
821 	case 54054000:
822 	case 59341000:
823 	case 59400000:
824 	case 72000000:
825 	case 74176000:
826 	case 74250000:
827 	case 81000000:
828 	case 81081000:
829 	case 89012000:
830 	case 89100000:
831 	case 108000000:
832 	case 108108000:
833 	case 111264000:
834 	case 111375000:
835 	case 148352000:
836 	case 148500000:
837 	case 162000000:
838 	case 162162000:
839 	case 222525000:
840 	case 222750000:
841 	case 296703000:
842 	case 297000000:
843 		return 0;
844 	case 233500000:
845 	case 245250000:
846 	case 247750000:
847 	case 253250000:
848 	case 298000000:
849 		return 1500;
850 	case 169128000:
851 	case 169500000:
852 	case 179500000:
853 	case 202000000:
854 		return 2000;
855 	case 256250000:
856 	case 262500000:
857 	case 270000000:
858 	case 272500000:
859 	case 273750000:
860 	case 280750000:
861 	case 281250000:
862 	case 286000000:
863 	case 291750000:
864 		return 4000;
865 	case 267250000:
866 	case 268500000:
867 		return 5000;
868 	default:
869 		return 1000;
870 	}
871 }
872 
873 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
874 				 unsigned int r2, unsigned int n2,
875 				 unsigned int p,
876 				 struct hsw_wrpll_rnp *best)
877 {
878 	u64 a, b, c, d, diff, diff_best;
879 
880 	/* No best (r,n,p) yet */
881 	if (best->p == 0) {
882 		best->p = p;
883 		best->n2 = n2;
884 		best->r2 = r2;
885 		return;
886 	}
887 
888 	/*
889 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
890 	 * freq2k.
891 	 *
892 	 * delta = 1e6 *
893 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
894 	 *	   freq2k;
895 	 *
896 	 * and we would like delta <= budget.
897 	 *
898 	 * If the discrepancy is above the PPM-based budget, always prefer to
899 	 * improve upon the previous solution.  However, if you're within the
900 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
901 	 */
902 	a = freq2k * budget * p * r2;
903 	b = freq2k * budget * best->p * best->r2;
904 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
905 	diff_best = abs_diff(freq2k * best->p * best->r2,
906 			     LC_FREQ_2K * best->n2);
907 	c = 1000000 * diff;
908 	d = 1000000 * diff_best;
909 
910 	if (a < c && b < d) {
911 		/* If both are above the budget, pick the closer */
912 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
913 			best->p = p;
914 			best->n2 = n2;
915 			best->r2 = r2;
916 		}
917 	} else if (a >= c && b < d) {
918 		/* If A is below the threshold but B is above it?  Update. */
919 		best->p = p;
920 		best->n2 = n2;
921 		best->r2 = r2;
922 	} else if (a >= c && b >= d) {
923 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
924 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
925 			best->p = p;
926 			best->n2 = n2;
927 			best->r2 = r2;
928 		}
929 	}
930 	/* Otherwise a < c && b >= d, do nothing */
931 }
932 
933 static void
934 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
935 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
936 {
937 	u64 freq2k;
938 	unsigned p, n2, r2;
939 	struct hsw_wrpll_rnp best = {};
940 	unsigned budget;
941 
942 	freq2k = clock / 100;
943 
944 	budget = hsw_wrpll_get_budget_for_freq(clock);
945 
946 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
947 	 * and directly pass the LC PLL to it. */
948 	if (freq2k == 5400000) {
949 		*n2_out = 2;
950 		*p_out = 1;
951 		*r2_out = 2;
952 		return;
953 	}
954 
955 	/*
956 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
957 	 * the WR PLL.
958 	 *
959 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
960 	 * Injecting R2 = 2 * R gives:
961 	 *   REF_MAX * r2 > LC_FREQ * 2 and
962 	 *   REF_MIN * r2 < LC_FREQ * 2
963 	 *
964 	 * Which means the desired boundaries for r2 are:
965 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
966 	 *
967 	 */
968 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
969 	     r2 <= LC_FREQ * 2 / REF_MIN;
970 	     r2++) {
971 
972 		/*
973 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
974 		 *
975 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
976 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
977 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
978 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
979 		 *
980 		 * Which means the desired boundaries for n2 are:
981 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
982 		 */
983 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
984 		     n2 <= VCO_MAX * r2 / LC_FREQ;
985 		     n2++) {
986 
987 			for (p = P_MIN; p <= P_MAX; p += P_INC)
988 				hsw_wrpll_update_rnp(freq2k, budget,
989 						     r2, n2, p, &best);
990 		}
991 	}
992 
993 	*n2_out = best.n2;
994 	*p_out = best.p;
995 	*r2_out = best.r2;
996 }
997 
998 static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
999 				  const struct intel_shared_dpll *pll,
1000 				  const struct intel_dpll_hw_state *dpll_hw_state)
1001 {
1002 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1003 	int refclk;
1004 	int n, p, r;
1005 	u32 wrpll = hw_state->wrpll;
1006 
1007 	switch (wrpll & WRPLL_REF_MASK) {
1008 	case WRPLL_REF_SPECIAL_HSW:
1009 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1010 		if (display->platform.haswell && !display->platform.haswell_ult) {
1011 			refclk = display->dpll.ref_clks.nssc;
1012 			break;
1013 		}
1014 		fallthrough;
1015 	case WRPLL_REF_PCH_SSC:
1016 		/*
1017 		 * We could calculate spread here, but our checking
1018 		 * code only cares about 5% accuracy, and spread is a max of
1019 		 * 0.5% downspread.
1020 		 */
1021 		refclk = display->dpll.ref_clks.ssc;
1022 		break;
1023 	case WRPLL_REF_LCPLL:
1024 		refclk = 2700000;
1025 		break;
1026 	default:
1027 		MISSING_CASE(wrpll);
1028 		return 0;
1029 	}
1030 
1031 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1032 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1033 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1034 
1035 	/* Convert to KHz, p & r have a fixed point portion */
1036 	return (refclk * n / 10) / (p * r) * 2;
1037 }
1038 
1039 static int
1040 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1041 			   struct intel_crtc *crtc)
1042 {
1043 	struct intel_display *display = to_intel_display(state);
1044 	struct intel_crtc_state *crtc_state =
1045 		intel_atomic_get_new_crtc_state(state, crtc);
1046 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1047 	unsigned int p, n2, r2;
1048 
1049 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1050 
1051 	hw_state->wrpll =
1052 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1053 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1054 		WRPLL_DIVIDER_POST(p);
1055 
1056 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(display, NULL,
1057 							&crtc_state->dpll_hw_state);
1058 
1059 	return 0;
1060 }
1061 
1062 static struct intel_shared_dpll *
1063 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1064 		       struct intel_crtc *crtc)
1065 {
1066 	struct intel_crtc_state *crtc_state =
1067 		intel_atomic_get_new_crtc_state(state, crtc);
1068 
1069 	return intel_find_shared_dpll(state, crtc,
1070 				      &crtc_state->dpll_hw_state,
1071 				      BIT(DPLL_ID_WRPLL2) |
1072 				      BIT(DPLL_ID_WRPLL1));
1073 }
1074 
1075 static int
1076 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1077 {
1078 	struct intel_display *display = to_intel_display(crtc_state);
1079 	int clock = crtc_state->port_clock;
1080 
1081 	switch (clock / 2) {
1082 	case 81000:
1083 	case 135000:
1084 	case 270000:
1085 		return 0;
1086 	default:
1087 		drm_dbg_kms(display->drm, "Invalid clock for DP: %d\n",
1088 			    clock);
1089 		return -EINVAL;
1090 	}
1091 }
1092 
1093 static struct intel_shared_dpll *
1094 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1095 {
1096 	struct intel_display *display = to_intel_display(crtc_state);
1097 	struct intel_shared_dpll *pll;
1098 	enum intel_dpll_id pll_id;
1099 	int clock = crtc_state->port_clock;
1100 
1101 	switch (clock / 2) {
1102 	case 81000:
1103 		pll_id = DPLL_ID_LCPLL_810;
1104 		break;
1105 	case 135000:
1106 		pll_id = DPLL_ID_LCPLL_1350;
1107 		break;
1108 	case 270000:
1109 		pll_id = DPLL_ID_LCPLL_2700;
1110 		break;
1111 	default:
1112 		MISSING_CASE(clock / 2);
1113 		return NULL;
1114 	}
1115 
1116 	pll = intel_get_shared_dpll_by_id(display, pll_id);
1117 
1118 	if (!pll)
1119 		return NULL;
1120 
1121 	return pll;
1122 }
1123 
1124 static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
1125 				  const struct intel_shared_dpll *pll,
1126 				  const struct intel_dpll_hw_state *dpll_hw_state)
1127 {
1128 	int link_clock = 0;
1129 
1130 	switch (pll->info->id) {
1131 	case DPLL_ID_LCPLL_810:
1132 		link_clock = 81000;
1133 		break;
1134 	case DPLL_ID_LCPLL_1350:
1135 		link_clock = 135000;
1136 		break;
1137 	case DPLL_ID_LCPLL_2700:
1138 		link_clock = 270000;
1139 		break;
1140 	default:
1141 		drm_WARN(display->drm, 1, "bad port clock sel\n");
1142 		break;
1143 	}
1144 
1145 	return link_clock * 2;
1146 }
1147 
1148 static int
1149 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1150 			  struct intel_crtc *crtc)
1151 {
1152 	struct intel_crtc_state *crtc_state =
1153 		intel_atomic_get_new_crtc_state(state, crtc);
1154 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1155 
1156 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1157 		return -EINVAL;
1158 
1159 	hw_state->spll =
1160 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1161 
1162 	return 0;
1163 }
1164 
1165 static struct intel_shared_dpll *
1166 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1167 		      struct intel_crtc *crtc)
1168 {
1169 	struct intel_crtc_state *crtc_state =
1170 		intel_atomic_get_new_crtc_state(state, crtc);
1171 
1172 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1173 				      BIT(DPLL_ID_SPLL));
1174 }
1175 
1176 static int hsw_ddi_spll_get_freq(struct intel_display *display,
1177 				 const struct intel_shared_dpll *pll,
1178 				 const struct intel_dpll_hw_state *dpll_hw_state)
1179 {
1180 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1181 	int link_clock = 0;
1182 
1183 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1184 	case SPLL_FREQ_810MHz:
1185 		link_clock = 81000;
1186 		break;
1187 	case SPLL_FREQ_1350MHz:
1188 		link_clock = 135000;
1189 		break;
1190 	case SPLL_FREQ_2700MHz:
1191 		link_clock = 270000;
1192 		break;
1193 	default:
1194 		drm_WARN(display->drm, 1, "bad spll freq\n");
1195 		break;
1196 	}
1197 
1198 	return link_clock * 2;
1199 }
1200 
1201 static int hsw_compute_dpll(struct intel_atomic_state *state,
1202 			    struct intel_crtc *crtc,
1203 			    struct intel_encoder *encoder)
1204 {
1205 	struct intel_crtc_state *crtc_state =
1206 		intel_atomic_get_new_crtc_state(state, crtc);
1207 
1208 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1209 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1210 	else if (intel_crtc_has_dp_encoder(crtc_state))
1211 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1212 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1213 		return hsw_ddi_spll_compute_dpll(state, crtc);
1214 	else
1215 		return -EINVAL;
1216 }
1217 
1218 static int hsw_get_dpll(struct intel_atomic_state *state,
1219 			struct intel_crtc *crtc,
1220 			struct intel_encoder *encoder)
1221 {
1222 	struct intel_crtc_state *crtc_state =
1223 		intel_atomic_get_new_crtc_state(state, crtc);
1224 	struct intel_shared_dpll *pll = NULL;
1225 
1226 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1227 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1228 	else if (intel_crtc_has_dp_encoder(crtc_state))
1229 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1230 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1231 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1232 
1233 	if (!pll)
1234 		return -EINVAL;
1235 
1236 	intel_reference_shared_dpll(state, crtc,
1237 				    pll, &crtc_state->dpll_hw_state);
1238 
1239 	crtc_state->shared_dpll = pll;
1240 
1241 	return 0;
1242 }
1243 
1244 static void hsw_update_dpll_ref_clks(struct intel_display *display)
1245 {
1246 	display->dpll.ref_clks.ssc = 135000;
1247 	/* Non-SSC is only used on non-ULT HSW. */
1248 	if (intel_de_read(display, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1249 		display->dpll.ref_clks.nssc = 24000;
1250 	else
1251 		display->dpll.ref_clks.nssc = 135000;
1252 }
1253 
1254 static void hsw_dump_hw_state(struct drm_printer *p,
1255 			      const struct intel_dpll_hw_state *dpll_hw_state)
1256 {
1257 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1258 
1259 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1260 		   hw_state->wrpll, hw_state->spll);
1261 }
1262 
1263 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1264 				 const struct intel_dpll_hw_state *_b)
1265 {
1266 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1267 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1268 
1269 	return a->wrpll == b->wrpll &&
1270 		a->spll == b->spll;
1271 }
1272 
1273 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1274 	.enable = hsw_ddi_wrpll_enable,
1275 	.disable = hsw_ddi_wrpll_disable,
1276 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1277 	.get_freq = hsw_ddi_wrpll_get_freq,
1278 };
1279 
1280 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1281 	.enable = hsw_ddi_spll_enable,
1282 	.disable = hsw_ddi_spll_disable,
1283 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1284 	.get_freq = hsw_ddi_spll_get_freq,
1285 };
1286 
1287 static void hsw_ddi_lcpll_enable(struct intel_display *display,
1288 				 struct intel_shared_dpll *pll,
1289 				 const struct intel_dpll_hw_state *hw_state)
1290 {
1291 }
1292 
1293 static void hsw_ddi_lcpll_disable(struct intel_display *display,
1294 				  struct intel_shared_dpll *pll)
1295 {
1296 }
1297 
1298 static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
1299 				       struct intel_shared_dpll *pll,
1300 				       struct intel_dpll_hw_state *dpll_hw_state)
1301 {
1302 	return true;
1303 }
1304 
1305 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1306 	.enable = hsw_ddi_lcpll_enable,
1307 	.disable = hsw_ddi_lcpll_disable,
1308 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1309 	.get_freq = hsw_ddi_lcpll_get_freq,
1310 };
1311 
1312 static const struct dpll_info hsw_plls[] = {
1313 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1314 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1315 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1316 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1317 	  .always_on = true, },
1318 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1319 	  .always_on = true, },
1320 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1321 	  .always_on = true, },
1322 	{}
1323 };
1324 
1325 static const struct intel_dpll_mgr hsw_pll_mgr = {
1326 	.dpll_info = hsw_plls,
1327 	.compute_dplls = hsw_compute_dpll,
1328 	.get_dplls = hsw_get_dpll,
1329 	.put_dplls = intel_put_dpll,
1330 	.update_ref_clks = hsw_update_dpll_ref_clks,
1331 	.dump_hw_state = hsw_dump_hw_state,
1332 	.compare_hw_state = hsw_compare_hw_state,
1333 };
1334 
1335 struct skl_dpll_regs {
1336 	i915_reg_t ctl, cfgcr1, cfgcr2;
1337 };
1338 
1339 /* this array is indexed by the *shared* pll id */
1340 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1341 	{
1342 		/* DPLL 0 */
1343 		.ctl = LCPLL1_CTL,
1344 		/* DPLL 0 doesn't support HDMI mode */
1345 	},
1346 	{
1347 		/* DPLL 1 */
1348 		.ctl = LCPLL2_CTL,
1349 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1350 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1351 	},
1352 	{
1353 		/* DPLL 2 */
1354 		.ctl = WRPLL_CTL(0),
1355 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1356 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1357 	},
1358 	{
1359 		/* DPLL 3 */
1360 		.ctl = WRPLL_CTL(1),
1361 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1362 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1363 	},
1364 };
1365 
1366 static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
1367 				    struct intel_shared_dpll *pll,
1368 				    const struct skl_dpll_hw_state *hw_state)
1369 {
1370 	const enum intel_dpll_id id = pll->info->id;
1371 
1372 	intel_de_rmw(display, DPLL_CTRL1,
1373 		     DPLL_CTRL1_HDMI_MODE(id) |
1374 		     DPLL_CTRL1_SSC(id) |
1375 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1376 		     hw_state->ctrl1 << (id * 6));
1377 	intel_de_posting_read(display, DPLL_CTRL1);
1378 }
1379 
1380 static void skl_ddi_pll_enable(struct intel_display *display,
1381 			       struct intel_shared_dpll *pll,
1382 			       const struct intel_dpll_hw_state *dpll_hw_state)
1383 {
1384 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1385 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1386 	const enum intel_dpll_id id = pll->info->id;
1387 
1388 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1389 
1390 	intel_de_write(display, regs[id].cfgcr1, hw_state->cfgcr1);
1391 	intel_de_write(display, regs[id].cfgcr2, hw_state->cfgcr2);
1392 	intel_de_posting_read(display, regs[id].cfgcr1);
1393 	intel_de_posting_read(display, regs[id].cfgcr2);
1394 
1395 	/* the enable bit is always bit 31 */
1396 	intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1397 
1398 	if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5))
1399 		drm_err(display->drm, "DPLL %d not locked\n", id);
1400 }
1401 
1402 static void skl_ddi_dpll0_enable(struct intel_display *display,
1403 				 struct intel_shared_dpll *pll,
1404 				 const struct intel_dpll_hw_state *dpll_hw_state)
1405 {
1406 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1407 
1408 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1409 }
1410 
1411 static void skl_ddi_pll_disable(struct intel_display *display,
1412 				struct intel_shared_dpll *pll)
1413 {
1414 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1415 	const enum intel_dpll_id id = pll->info->id;
1416 
1417 	/* the enable bit is always bit 31 */
1418 	intel_de_rmw(display, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1419 	intel_de_posting_read(display, regs[id].ctl);
1420 }
1421 
1422 static void skl_ddi_dpll0_disable(struct intel_display *display,
1423 				  struct intel_shared_dpll *pll)
1424 {
1425 }
1426 
1427 static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
1428 				     struct intel_shared_dpll *pll,
1429 				     struct intel_dpll_hw_state *dpll_hw_state)
1430 {
1431 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1432 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1433 	const enum intel_dpll_id id = pll->info->id;
1434 	intel_wakeref_t wakeref;
1435 	bool ret;
1436 	u32 val;
1437 
1438 	wakeref = intel_display_power_get_if_enabled(display,
1439 						     POWER_DOMAIN_DISPLAY_CORE);
1440 	if (!wakeref)
1441 		return false;
1442 
1443 	ret = false;
1444 
1445 	val = intel_de_read(display, regs[id].ctl);
1446 	if (!(val & LCPLL_PLL_ENABLE))
1447 		goto out;
1448 
1449 	val = intel_de_read(display, DPLL_CTRL1);
1450 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1451 
1452 	/* avoid reading back stale values if HDMI mode is not enabled */
1453 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1454 		hw_state->cfgcr1 = intel_de_read(display, regs[id].cfgcr1);
1455 		hw_state->cfgcr2 = intel_de_read(display, regs[id].cfgcr2);
1456 	}
1457 	ret = true;
1458 
1459 out:
1460 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1461 
1462 	return ret;
1463 }
1464 
1465 static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
1466 				       struct intel_shared_dpll *pll,
1467 				       struct intel_dpll_hw_state *dpll_hw_state)
1468 {
1469 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1470 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1471 	const enum intel_dpll_id id = pll->info->id;
1472 	intel_wakeref_t wakeref;
1473 	u32 val;
1474 	bool ret;
1475 
1476 	wakeref = intel_display_power_get_if_enabled(display,
1477 						     POWER_DOMAIN_DISPLAY_CORE);
1478 	if (!wakeref)
1479 		return false;
1480 
1481 	ret = false;
1482 
1483 	/* DPLL0 is always enabled since it drives CDCLK */
1484 	val = intel_de_read(display, regs[id].ctl);
1485 	if (drm_WARN_ON(display->drm, !(val & LCPLL_PLL_ENABLE)))
1486 		goto out;
1487 
1488 	val = intel_de_read(display, DPLL_CTRL1);
1489 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1490 
1491 	ret = true;
1492 
1493 out:
1494 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1495 
1496 	return ret;
1497 }
1498 
1499 struct skl_wrpll_context {
1500 	u64 min_deviation;		/* current minimal deviation */
1501 	u64 central_freq;		/* chosen central freq */
1502 	u64 dco_freq;			/* chosen dco freq */
1503 	unsigned int p;			/* chosen divider */
1504 };
1505 
1506 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1507 #define SKL_DCO_MAX_PDEVIATION	100
1508 #define SKL_DCO_MAX_NDEVIATION	600
1509 
1510 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1511 				  u64 central_freq,
1512 				  u64 dco_freq,
1513 				  unsigned int divider)
1514 {
1515 	u64 deviation;
1516 
1517 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1518 			      central_freq);
1519 
1520 	/* positive deviation */
1521 	if (dco_freq >= central_freq) {
1522 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1523 		    deviation < ctx->min_deviation) {
1524 			ctx->min_deviation = deviation;
1525 			ctx->central_freq = central_freq;
1526 			ctx->dco_freq = dco_freq;
1527 			ctx->p = divider;
1528 		}
1529 	/* negative deviation */
1530 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1531 		   deviation < ctx->min_deviation) {
1532 		ctx->min_deviation = deviation;
1533 		ctx->central_freq = central_freq;
1534 		ctx->dco_freq = dco_freq;
1535 		ctx->p = divider;
1536 	}
1537 }
1538 
1539 static void skl_wrpll_get_multipliers(unsigned int p,
1540 				      unsigned int *p0 /* out */,
1541 				      unsigned int *p1 /* out */,
1542 				      unsigned int *p2 /* out */)
1543 {
1544 	/* even dividers */
1545 	if (p % 2 == 0) {
1546 		unsigned int half = p / 2;
1547 
1548 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1549 			*p0 = 2;
1550 			*p1 = 1;
1551 			*p2 = half;
1552 		} else if (half % 2 == 0) {
1553 			*p0 = 2;
1554 			*p1 = half / 2;
1555 			*p2 = 2;
1556 		} else if (half % 3 == 0) {
1557 			*p0 = 3;
1558 			*p1 = half / 3;
1559 			*p2 = 2;
1560 		} else if (half % 7 == 0) {
1561 			*p0 = 7;
1562 			*p1 = half / 7;
1563 			*p2 = 2;
1564 		}
1565 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1566 		*p0 = 3;
1567 		*p1 = 1;
1568 		*p2 = p / 3;
1569 	} else if (p == 5 || p == 7) {
1570 		*p0 = p;
1571 		*p1 = 1;
1572 		*p2 = 1;
1573 	} else if (p == 15) {
1574 		*p0 = 3;
1575 		*p1 = 1;
1576 		*p2 = 5;
1577 	} else if (p == 21) {
1578 		*p0 = 7;
1579 		*p1 = 1;
1580 		*p2 = 3;
1581 	} else if (p == 35) {
1582 		*p0 = 7;
1583 		*p1 = 1;
1584 		*p2 = 5;
1585 	}
1586 }
1587 
1588 struct skl_wrpll_params {
1589 	u32 dco_fraction;
1590 	u32 dco_integer;
1591 	u32 qdiv_ratio;
1592 	u32 qdiv_mode;
1593 	u32 kdiv;
1594 	u32 pdiv;
1595 	u32 central_freq;
1596 };
1597 
1598 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1599 				      u64 afe_clock,
1600 				      int ref_clock,
1601 				      u64 central_freq,
1602 				      u32 p0, u32 p1, u32 p2)
1603 {
1604 	u64 dco_freq;
1605 
1606 	switch (central_freq) {
1607 	case 9600000000ULL:
1608 		params->central_freq = 0;
1609 		break;
1610 	case 9000000000ULL:
1611 		params->central_freq = 1;
1612 		break;
1613 	case 8400000000ULL:
1614 		params->central_freq = 3;
1615 	}
1616 
1617 	switch (p0) {
1618 	case 1:
1619 		params->pdiv = 0;
1620 		break;
1621 	case 2:
1622 		params->pdiv = 1;
1623 		break;
1624 	case 3:
1625 		params->pdiv = 2;
1626 		break;
1627 	case 7:
1628 		params->pdiv = 4;
1629 		break;
1630 	default:
1631 		WARN(1, "Incorrect PDiv\n");
1632 	}
1633 
1634 	switch (p2) {
1635 	case 5:
1636 		params->kdiv = 0;
1637 		break;
1638 	case 2:
1639 		params->kdiv = 1;
1640 		break;
1641 	case 3:
1642 		params->kdiv = 2;
1643 		break;
1644 	case 1:
1645 		params->kdiv = 3;
1646 		break;
1647 	default:
1648 		WARN(1, "Incorrect KDiv\n");
1649 	}
1650 
1651 	params->qdiv_ratio = p1;
1652 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1653 
1654 	dco_freq = p0 * p1 * p2 * afe_clock;
1655 
1656 	/*
1657 	 * Intermediate values are in Hz.
1658 	 * Divide by MHz to match bsepc
1659 	 */
1660 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1661 	params->dco_fraction =
1662 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1663 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1664 }
1665 
1666 static int
1667 skl_ddi_calculate_wrpll(int clock,
1668 			int ref_clock,
1669 			struct skl_wrpll_params *wrpll_params)
1670 {
1671 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1672 						 9000000000ULL,
1673 						 9600000000ULL };
1674 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1675 					    24, 28, 30, 32, 36, 40, 42, 44,
1676 					    48, 52, 54, 56, 60, 64, 66, 68,
1677 					    70, 72, 76, 78, 80, 84, 88, 90,
1678 					    92, 96, 98 };
1679 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1680 	static const struct {
1681 		const u8 *list;
1682 		int n_dividers;
1683 	} dividers[] = {
1684 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1685 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1686 	};
1687 	struct skl_wrpll_context ctx = {
1688 		.min_deviation = U64_MAX,
1689 	};
1690 	unsigned int dco, d, i;
1691 	unsigned int p0, p1, p2;
1692 	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1693 
1694 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1695 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1696 			for (i = 0; i < dividers[d].n_dividers; i++) {
1697 				unsigned int p = dividers[d].list[i];
1698 				u64 dco_freq = p * afe_clock;
1699 
1700 				skl_wrpll_try_divider(&ctx,
1701 						      dco_central_freq[dco],
1702 						      dco_freq,
1703 						      p);
1704 				/*
1705 				 * Skip the remaining dividers if we're sure to
1706 				 * have found the definitive divider, we can't
1707 				 * improve a 0 deviation.
1708 				 */
1709 				if (ctx.min_deviation == 0)
1710 					goto skip_remaining_dividers;
1711 			}
1712 		}
1713 
1714 skip_remaining_dividers:
1715 		/*
1716 		 * If a solution is found with an even divider, prefer
1717 		 * this one.
1718 		 */
1719 		if (d == 0 && ctx.p)
1720 			break;
1721 	}
1722 
1723 	if (!ctx.p)
1724 		return -EINVAL;
1725 
1726 	/*
1727 	 * gcc incorrectly analyses that these can be used without being
1728 	 * initialized. To be fair, it's hard to guess.
1729 	 */
1730 	p0 = p1 = p2 = 0;
1731 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1732 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1733 				  ctx.central_freq, p0, p1, p2);
1734 
1735 	return 0;
1736 }
1737 
1738 static int skl_ddi_wrpll_get_freq(struct intel_display *display,
1739 				  const struct intel_shared_dpll *pll,
1740 				  const struct intel_dpll_hw_state *dpll_hw_state)
1741 {
1742 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1743 	int ref_clock = display->dpll.ref_clks.nssc;
1744 	u32 p0, p1, p2, dco_freq;
1745 
1746 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1747 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1748 
1749 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1750 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1751 	else
1752 		p1 = 1;
1753 
1754 
1755 	switch (p0) {
1756 	case DPLL_CFGCR2_PDIV_1:
1757 		p0 = 1;
1758 		break;
1759 	case DPLL_CFGCR2_PDIV_2:
1760 		p0 = 2;
1761 		break;
1762 	case DPLL_CFGCR2_PDIV_3:
1763 		p0 = 3;
1764 		break;
1765 	case DPLL_CFGCR2_PDIV_7_INVALID:
1766 		/*
1767 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1768 		 * handling it the same way as PDIV_7.
1769 		 */
1770 		drm_dbg_kms(display->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1771 		fallthrough;
1772 	case DPLL_CFGCR2_PDIV_7:
1773 		p0 = 7;
1774 		break;
1775 	default:
1776 		MISSING_CASE(p0);
1777 		return 0;
1778 	}
1779 
1780 	switch (p2) {
1781 	case DPLL_CFGCR2_KDIV_5:
1782 		p2 = 5;
1783 		break;
1784 	case DPLL_CFGCR2_KDIV_2:
1785 		p2 = 2;
1786 		break;
1787 	case DPLL_CFGCR2_KDIV_3:
1788 		p2 = 3;
1789 		break;
1790 	case DPLL_CFGCR2_KDIV_1:
1791 		p2 = 1;
1792 		break;
1793 	default:
1794 		MISSING_CASE(p2);
1795 		return 0;
1796 	}
1797 
1798 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1799 		   ref_clock;
1800 
1801 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1802 		    ref_clock / 0x8000;
1803 
1804 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
1805 		return 0;
1806 
1807 	return dco_freq / (p0 * p1 * p2 * 5);
1808 }
1809 
1810 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1811 {
1812 	struct intel_display *display = to_intel_display(crtc_state);
1813 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1814 	struct skl_wrpll_params wrpll_params = {};
1815 	int ret;
1816 
1817 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1818 				      display->dpll.ref_clks.nssc, &wrpll_params);
1819 	if (ret)
1820 		return ret;
1821 
1822 	/*
1823 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1824 	 * as the DPLL id in this function.
1825 	 */
1826 	hw_state->ctrl1 =
1827 		DPLL_CTRL1_OVERRIDE(0) |
1828 		DPLL_CTRL1_HDMI_MODE(0);
1829 
1830 	hw_state->cfgcr1 =
1831 		DPLL_CFGCR1_FREQ_ENABLE |
1832 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1833 		wrpll_params.dco_integer;
1834 
1835 	hw_state->cfgcr2 =
1836 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1837 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1838 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1839 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1840 		wrpll_params.central_freq;
1841 
1842 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(display, NULL,
1843 							&crtc_state->dpll_hw_state);
1844 
1845 	return 0;
1846 }
1847 
1848 static int
1849 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1850 {
1851 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1852 	u32 ctrl1;
1853 
1854 	/*
1855 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1856 	 * as the DPLL id in this function.
1857 	 */
1858 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1859 	switch (crtc_state->port_clock / 2) {
1860 	case 81000:
1861 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1862 		break;
1863 	case 135000:
1864 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1865 		break;
1866 	case 270000:
1867 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1868 		break;
1869 		/* eDP 1.4 rates */
1870 	case 162000:
1871 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1872 		break;
1873 	case 108000:
1874 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1875 		break;
1876 	case 216000:
1877 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1878 		break;
1879 	}
1880 
1881 	hw_state->ctrl1 = ctrl1;
1882 
1883 	return 0;
1884 }
1885 
1886 static int skl_ddi_lcpll_get_freq(struct intel_display *display,
1887 				  const struct intel_shared_dpll *pll,
1888 				  const struct intel_dpll_hw_state *dpll_hw_state)
1889 {
1890 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1891 	int link_clock = 0;
1892 
1893 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1894 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1895 	case DPLL_CTRL1_LINK_RATE_810:
1896 		link_clock = 81000;
1897 		break;
1898 	case DPLL_CTRL1_LINK_RATE_1080:
1899 		link_clock = 108000;
1900 		break;
1901 	case DPLL_CTRL1_LINK_RATE_1350:
1902 		link_clock = 135000;
1903 		break;
1904 	case DPLL_CTRL1_LINK_RATE_1620:
1905 		link_clock = 162000;
1906 		break;
1907 	case DPLL_CTRL1_LINK_RATE_2160:
1908 		link_clock = 216000;
1909 		break;
1910 	case DPLL_CTRL1_LINK_RATE_2700:
1911 		link_clock = 270000;
1912 		break;
1913 	default:
1914 		drm_WARN(display->drm, 1, "Unsupported link rate\n");
1915 		break;
1916 	}
1917 
1918 	return link_clock * 2;
1919 }
1920 
1921 static int skl_compute_dpll(struct intel_atomic_state *state,
1922 			    struct intel_crtc *crtc,
1923 			    struct intel_encoder *encoder)
1924 {
1925 	struct intel_crtc_state *crtc_state =
1926 		intel_atomic_get_new_crtc_state(state, crtc);
1927 
1928 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1929 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1930 	else if (intel_crtc_has_dp_encoder(crtc_state))
1931 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1932 	else
1933 		return -EINVAL;
1934 }
1935 
1936 static int skl_get_dpll(struct intel_atomic_state *state,
1937 			struct intel_crtc *crtc,
1938 			struct intel_encoder *encoder)
1939 {
1940 	struct intel_crtc_state *crtc_state =
1941 		intel_atomic_get_new_crtc_state(state, crtc);
1942 	struct intel_shared_dpll *pll;
1943 
1944 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1945 		pll = intel_find_shared_dpll(state, crtc,
1946 					     &crtc_state->dpll_hw_state,
1947 					     BIT(DPLL_ID_SKL_DPLL0));
1948 	else
1949 		pll = intel_find_shared_dpll(state, crtc,
1950 					     &crtc_state->dpll_hw_state,
1951 					     BIT(DPLL_ID_SKL_DPLL3) |
1952 					     BIT(DPLL_ID_SKL_DPLL2) |
1953 					     BIT(DPLL_ID_SKL_DPLL1));
1954 	if (!pll)
1955 		return -EINVAL;
1956 
1957 	intel_reference_shared_dpll(state, crtc,
1958 				    pll, &crtc_state->dpll_hw_state);
1959 
1960 	crtc_state->shared_dpll = pll;
1961 
1962 	return 0;
1963 }
1964 
1965 static int skl_ddi_pll_get_freq(struct intel_display *display,
1966 				const struct intel_shared_dpll *pll,
1967 				const struct intel_dpll_hw_state *dpll_hw_state)
1968 {
1969 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1970 
1971 	/*
1972 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1973 	 * the internal shift for each field
1974 	 */
1975 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1976 		return skl_ddi_wrpll_get_freq(display, pll, dpll_hw_state);
1977 	else
1978 		return skl_ddi_lcpll_get_freq(display, pll, dpll_hw_state);
1979 }
1980 
1981 static void skl_update_dpll_ref_clks(struct intel_display *display)
1982 {
1983 	/* No SSC ref */
1984 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
1985 }
1986 
1987 static void skl_dump_hw_state(struct drm_printer *p,
1988 			      const struct intel_dpll_hw_state *dpll_hw_state)
1989 {
1990 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1991 
1992 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1993 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1994 }
1995 
1996 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1997 				 const struct intel_dpll_hw_state *_b)
1998 {
1999 	const struct skl_dpll_hw_state *a = &_a->skl;
2000 	const struct skl_dpll_hw_state *b = &_b->skl;
2001 
2002 	return a->ctrl1 == b->ctrl1 &&
2003 		a->cfgcr1 == b->cfgcr1 &&
2004 		a->cfgcr2 == b->cfgcr2;
2005 }
2006 
2007 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2008 	.enable = skl_ddi_pll_enable,
2009 	.disable = skl_ddi_pll_disable,
2010 	.get_hw_state = skl_ddi_pll_get_hw_state,
2011 	.get_freq = skl_ddi_pll_get_freq,
2012 };
2013 
2014 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2015 	.enable = skl_ddi_dpll0_enable,
2016 	.disable = skl_ddi_dpll0_disable,
2017 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2018 	.get_freq = skl_ddi_pll_get_freq,
2019 };
2020 
2021 static const struct dpll_info skl_plls[] = {
2022 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2023 	  .always_on = true, },
2024 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2025 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2026 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2027 	{}
2028 };
2029 
2030 static const struct intel_dpll_mgr skl_pll_mgr = {
2031 	.dpll_info = skl_plls,
2032 	.compute_dplls = skl_compute_dpll,
2033 	.get_dplls = skl_get_dpll,
2034 	.put_dplls = intel_put_dpll,
2035 	.update_ref_clks = skl_update_dpll_ref_clks,
2036 	.dump_hw_state = skl_dump_hw_state,
2037 	.compare_hw_state = skl_compare_hw_state,
2038 };
2039 
2040 static void bxt_ddi_pll_enable(struct intel_display *display,
2041 			       struct intel_shared_dpll *pll,
2042 			       const struct intel_dpll_hw_state *dpll_hw_state)
2043 {
2044 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2045 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2046 	enum dpio_phy phy = DPIO_PHY0;
2047 	enum dpio_channel ch = DPIO_CH0;
2048 	u32 temp;
2049 
2050 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2051 
2052 	/* Non-SSC reference */
2053 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2054 
2055 	if (display->platform.geminilake) {
2056 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2057 			     0, PORT_PLL_POWER_ENABLE);
2058 
2059 		if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
2060 				 PORT_PLL_POWER_STATE), 200))
2061 			drm_err(display->drm,
2062 				"Power state not set for PLL:%d\n", port);
2063 	}
2064 
2065 	/* Disable 10 bit clock */
2066 	intel_de_rmw(display, BXT_PORT_PLL_EBB_4(phy, ch),
2067 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2068 
2069 	/* Write P1 & P2 */
2070 	intel_de_rmw(display, BXT_PORT_PLL_EBB_0(phy, ch),
2071 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2072 
2073 	/* Write M2 integer */
2074 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 0),
2075 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2076 
2077 	/* Write N */
2078 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 1),
2079 		     PORT_PLL_N_MASK, hw_state->pll1);
2080 
2081 	/* Write M2 fraction */
2082 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 2),
2083 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2084 
2085 	/* Write M2 fraction enable */
2086 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 3),
2087 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2088 
2089 	/* Write coeff */
2090 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2091 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2092 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2093 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2094 	temp |= hw_state->pll6;
2095 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 6), temp);
2096 
2097 	/* Write calibration val */
2098 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 8),
2099 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2100 
2101 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 9),
2102 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2103 
2104 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2105 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2106 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2107 	temp |= hw_state->pll10;
2108 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 10), temp);
2109 
2110 	/* Recalibrate with new settings */
2111 	temp = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2112 	temp |= PORT_PLL_RECALIBRATE;
2113 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2114 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2115 	temp |= hw_state->ebb4;
2116 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2117 
2118 	/* Enable PLL */
2119 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2120 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2121 
2122 	if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2123 			200))
2124 		drm_err(display->drm, "PLL %d not locked\n", port);
2125 
2126 	if (display->platform.geminilake) {
2127 		temp = intel_de_read(display, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2128 		temp |= DCC_DELAY_RANGE_2;
2129 		intel_de_write(display, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2130 	}
2131 
2132 	/*
2133 	 * While we write to the group register to program all lanes at once we
2134 	 * can read only lane registers and we pick lanes 0/1 for that.
2135 	 */
2136 	temp = intel_de_read(display, BXT_PORT_PCS_DW12_LN01(phy, ch));
2137 	temp &= ~LANE_STAGGER_MASK;
2138 	temp &= ~LANESTAGGER_STRAP_OVRD;
2139 	temp |= hw_state->pcsdw12;
2140 	intel_de_write(display, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2141 }
2142 
2143 static void bxt_ddi_pll_disable(struct intel_display *display,
2144 				struct intel_shared_dpll *pll)
2145 {
2146 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2147 
2148 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2149 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2150 
2151 	if (display->platform.geminilake) {
2152 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2153 			     PORT_PLL_POWER_ENABLE, 0);
2154 
2155 		if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
2156 				  PORT_PLL_POWER_STATE), 200))
2157 			drm_err(display->drm,
2158 				"Power state not reset for PLL:%d\n", port);
2159 	}
2160 }
2161 
2162 static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
2163 				     struct intel_shared_dpll *pll,
2164 				     struct intel_dpll_hw_state *dpll_hw_state)
2165 {
2166 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2167 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2168 	intel_wakeref_t wakeref;
2169 	enum dpio_phy phy;
2170 	enum dpio_channel ch;
2171 	u32 val;
2172 	bool ret;
2173 
2174 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2175 
2176 	wakeref = intel_display_power_get_if_enabled(display,
2177 						     POWER_DOMAIN_DISPLAY_CORE);
2178 	if (!wakeref)
2179 		return false;
2180 
2181 	ret = false;
2182 
2183 	val = intel_de_read(display, BXT_PORT_PLL_ENABLE(port));
2184 	if (!(val & PORT_PLL_ENABLE))
2185 		goto out;
2186 
2187 	hw_state->ebb0 = intel_de_read(display, BXT_PORT_PLL_EBB_0(phy, ch));
2188 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2189 
2190 	hw_state->ebb4 = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2191 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2192 
2193 	hw_state->pll0 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 0));
2194 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2195 
2196 	hw_state->pll1 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 1));
2197 	hw_state->pll1 &= PORT_PLL_N_MASK;
2198 
2199 	hw_state->pll2 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 2));
2200 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2201 
2202 	hw_state->pll3 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 3));
2203 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2204 
2205 	hw_state->pll6 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2206 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2207 			  PORT_PLL_INT_COEFF_MASK |
2208 			  PORT_PLL_GAIN_CTL_MASK;
2209 
2210 	hw_state->pll8 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 8));
2211 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2212 
2213 	hw_state->pll9 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 9));
2214 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2215 
2216 	hw_state->pll10 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2217 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2218 			   PORT_PLL_DCO_AMP_MASK;
2219 
2220 	/*
2221 	 * While we write to the group register to program all lanes at once we
2222 	 * can read only lane registers. We configure all lanes the same way, so
2223 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2224 	 */
2225 	hw_state->pcsdw12 = intel_de_read(display,
2226 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2227 	if (intel_de_read(display, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2228 		drm_dbg(display->drm,
2229 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2230 			hw_state->pcsdw12,
2231 			intel_de_read(display,
2232 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2233 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2234 
2235 	ret = true;
2236 
2237 out:
2238 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2239 
2240 	return ret;
2241 }
2242 
2243 /* pre-calculated values for DP linkrates */
2244 static const struct dpll bxt_dp_clk_val[] = {
2245 	/* m2 is .22 binary fixed point */
2246 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2247 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2248 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2249 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2250 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2251 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2252 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2253 };
2254 
2255 static int
2256 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2257 			  struct dpll *clk_div)
2258 {
2259 	struct intel_display *display = to_intel_display(crtc_state);
2260 
2261 	/* Calculate HDMI div */
2262 	/*
2263 	 * FIXME: tie the following calculation into
2264 	 * i9xx_crtc_compute_clock
2265 	 */
2266 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2267 		return -EINVAL;
2268 
2269 	drm_WARN_ON(display->drm, clk_div->m1 != 2);
2270 
2271 	return 0;
2272 }
2273 
2274 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2275 				    struct dpll *clk_div)
2276 {
2277 	struct intel_display *display = to_intel_display(crtc_state);
2278 	int i;
2279 
2280 	*clk_div = bxt_dp_clk_val[0];
2281 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2282 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2283 			*clk_div = bxt_dp_clk_val[i];
2284 			break;
2285 		}
2286 	}
2287 
2288 	chv_calc_dpll_params(display->dpll.ref_clks.nssc, clk_div);
2289 
2290 	drm_WARN_ON(display->drm, clk_div->vco == 0 ||
2291 		    clk_div->dot != crtc_state->port_clock);
2292 }
2293 
2294 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2295 				     const struct dpll *clk_div)
2296 {
2297 	struct intel_display *display = to_intel_display(crtc_state);
2298 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2299 	int clock = crtc_state->port_clock;
2300 	int vco = clk_div->vco;
2301 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2302 	u32 lanestagger;
2303 
2304 	if (vco >= 6200000 && vco <= 6700000) {
2305 		prop_coef = 4;
2306 		int_coef = 9;
2307 		gain_ctl = 3;
2308 		targ_cnt = 8;
2309 	} else if ((vco > 5400000 && vco < 6200000) ||
2310 			(vco >= 4800000 && vco < 5400000)) {
2311 		prop_coef = 5;
2312 		int_coef = 11;
2313 		gain_ctl = 3;
2314 		targ_cnt = 9;
2315 	} else if (vco == 5400000) {
2316 		prop_coef = 3;
2317 		int_coef = 8;
2318 		gain_ctl = 1;
2319 		targ_cnt = 9;
2320 	} else {
2321 		drm_err(display->drm, "Invalid VCO\n");
2322 		return -EINVAL;
2323 	}
2324 
2325 	if (clock > 270000)
2326 		lanestagger = 0x18;
2327 	else if (clock > 135000)
2328 		lanestagger = 0x0d;
2329 	else if (clock > 67000)
2330 		lanestagger = 0x07;
2331 	else if (clock > 33000)
2332 		lanestagger = 0x04;
2333 	else
2334 		lanestagger = 0x02;
2335 
2336 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2337 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2338 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2339 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2340 
2341 	if (clk_div->m2 & 0x3fffff)
2342 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2343 
2344 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2345 		PORT_PLL_INT_COEFF(int_coef) |
2346 		PORT_PLL_GAIN_CTL(gain_ctl);
2347 
2348 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2349 
2350 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2351 
2352 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2353 		PORT_PLL_DCO_AMP_OVR_EN_H;
2354 
2355 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2356 
2357 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2358 
2359 	return 0;
2360 }
2361 
2362 static int bxt_ddi_pll_get_freq(struct intel_display *display,
2363 				const struct intel_shared_dpll *pll,
2364 				const struct intel_dpll_hw_state *dpll_hw_state)
2365 {
2366 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2367 	struct dpll clock;
2368 
2369 	clock.m1 = 2;
2370 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2371 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2372 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2373 					  hw_state->pll2);
2374 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2375 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2376 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2377 
2378 	return chv_calc_dpll_params(display->dpll.ref_clks.nssc, &clock);
2379 }
2380 
2381 static int
2382 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2383 {
2384 	struct dpll clk_div = {};
2385 
2386 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2387 
2388 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2389 }
2390 
2391 static int
2392 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2393 {
2394 	struct intel_display *display = to_intel_display(crtc_state);
2395 	struct dpll clk_div = {};
2396 	int ret;
2397 
2398 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2399 
2400 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2401 	if (ret)
2402 		return ret;
2403 
2404 	crtc_state->port_clock = bxt_ddi_pll_get_freq(display, NULL,
2405 						      &crtc_state->dpll_hw_state);
2406 
2407 	return 0;
2408 }
2409 
2410 static int bxt_compute_dpll(struct intel_atomic_state *state,
2411 			    struct intel_crtc *crtc,
2412 			    struct intel_encoder *encoder)
2413 {
2414 	struct intel_crtc_state *crtc_state =
2415 		intel_atomic_get_new_crtc_state(state, crtc);
2416 
2417 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2418 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2419 	else if (intel_crtc_has_dp_encoder(crtc_state))
2420 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2421 	else
2422 		return -EINVAL;
2423 }
2424 
2425 static int bxt_get_dpll(struct intel_atomic_state *state,
2426 			struct intel_crtc *crtc,
2427 			struct intel_encoder *encoder)
2428 {
2429 	struct intel_display *display = to_intel_display(state);
2430 	struct intel_crtc_state *crtc_state =
2431 		intel_atomic_get_new_crtc_state(state, crtc);
2432 	struct intel_shared_dpll *pll;
2433 	enum intel_dpll_id id;
2434 
2435 	/* 1:1 mapping between ports and PLLs */
2436 	id = (enum intel_dpll_id) encoder->port;
2437 	pll = intel_get_shared_dpll_by_id(display, id);
2438 
2439 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2440 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2441 
2442 	intel_reference_shared_dpll(state, crtc,
2443 				    pll, &crtc_state->dpll_hw_state);
2444 
2445 	crtc_state->shared_dpll = pll;
2446 
2447 	return 0;
2448 }
2449 
2450 static void bxt_update_dpll_ref_clks(struct intel_display *display)
2451 {
2452 	display->dpll.ref_clks.ssc = 100000;
2453 	display->dpll.ref_clks.nssc = 100000;
2454 	/* DSI non-SSC ref 19.2MHz */
2455 }
2456 
2457 static void bxt_dump_hw_state(struct drm_printer *p,
2458 			      const struct intel_dpll_hw_state *dpll_hw_state)
2459 {
2460 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2461 
2462 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2463 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2464 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2465 		   hw_state->ebb0, hw_state->ebb4,
2466 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2467 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2468 		   hw_state->pcsdw12);
2469 }
2470 
2471 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2472 				 const struct intel_dpll_hw_state *_b)
2473 {
2474 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2475 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2476 
2477 	return a->ebb0 == b->ebb0 &&
2478 		a->ebb4 == b->ebb4 &&
2479 		a->pll0 == b->pll0 &&
2480 		a->pll1 == b->pll1 &&
2481 		a->pll2 == b->pll2 &&
2482 		a->pll3 == b->pll3 &&
2483 		a->pll6 == b->pll6 &&
2484 		a->pll8 == b->pll8 &&
2485 		a->pll10 == b->pll10 &&
2486 		a->pcsdw12 == b->pcsdw12;
2487 }
2488 
2489 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2490 	.enable = bxt_ddi_pll_enable,
2491 	.disable = bxt_ddi_pll_disable,
2492 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2493 	.get_freq = bxt_ddi_pll_get_freq,
2494 };
2495 
2496 static const struct dpll_info bxt_plls[] = {
2497 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2498 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2499 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2500 	{}
2501 };
2502 
2503 static const struct intel_dpll_mgr bxt_pll_mgr = {
2504 	.dpll_info = bxt_plls,
2505 	.compute_dplls = bxt_compute_dpll,
2506 	.get_dplls = bxt_get_dpll,
2507 	.put_dplls = intel_put_dpll,
2508 	.update_ref_clks = bxt_update_dpll_ref_clks,
2509 	.dump_hw_state = bxt_dump_hw_state,
2510 	.compare_hw_state = bxt_compare_hw_state,
2511 };
2512 
2513 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2514 				      int *qdiv, int *kdiv)
2515 {
2516 	/* even dividers */
2517 	if (bestdiv % 2 == 0) {
2518 		if (bestdiv == 2) {
2519 			*pdiv = 2;
2520 			*qdiv = 1;
2521 			*kdiv = 1;
2522 		} else if (bestdiv % 4 == 0) {
2523 			*pdiv = 2;
2524 			*qdiv = bestdiv / 4;
2525 			*kdiv = 2;
2526 		} else if (bestdiv % 6 == 0) {
2527 			*pdiv = 3;
2528 			*qdiv = bestdiv / 6;
2529 			*kdiv = 2;
2530 		} else if (bestdiv % 5 == 0) {
2531 			*pdiv = 5;
2532 			*qdiv = bestdiv / 10;
2533 			*kdiv = 2;
2534 		} else if (bestdiv % 14 == 0) {
2535 			*pdiv = 7;
2536 			*qdiv = bestdiv / 14;
2537 			*kdiv = 2;
2538 		}
2539 	} else {
2540 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2541 			*pdiv = bestdiv;
2542 			*qdiv = 1;
2543 			*kdiv = 1;
2544 		} else { /* 9, 15, 21 */
2545 			*pdiv = bestdiv / 3;
2546 			*qdiv = 1;
2547 			*kdiv = 3;
2548 		}
2549 	}
2550 }
2551 
2552 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2553 				      u32 dco_freq, u32 ref_freq,
2554 				      int pdiv, int qdiv, int kdiv)
2555 {
2556 	u32 dco;
2557 
2558 	switch (kdiv) {
2559 	case 1:
2560 		params->kdiv = 1;
2561 		break;
2562 	case 2:
2563 		params->kdiv = 2;
2564 		break;
2565 	case 3:
2566 		params->kdiv = 4;
2567 		break;
2568 	default:
2569 		WARN(1, "Incorrect KDiv\n");
2570 	}
2571 
2572 	switch (pdiv) {
2573 	case 2:
2574 		params->pdiv = 1;
2575 		break;
2576 	case 3:
2577 		params->pdiv = 2;
2578 		break;
2579 	case 5:
2580 		params->pdiv = 4;
2581 		break;
2582 	case 7:
2583 		params->pdiv = 8;
2584 		break;
2585 	default:
2586 		WARN(1, "Incorrect PDiv\n");
2587 	}
2588 
2589 	WARN_ON(kdiv != 2 && qdiv != 1);
2590 
2591 	params->qdiv_ratio = qdiv;
2592 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2593 
2594 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2595 
2596 	params->dco_integer = dco >> 15;
2597 	params->dco_fraction = dco & 0x7fff;
2598 }
2599 
2600 /*
2601  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2602  * Program half of the nominal DCO divider fraction value.
2603  */
2604 static bool
2605 ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
2606 {
2607 	return ((display->platform.elkhartlake &&
2608 		 IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
2609 		DISPLAY_VER(display) >= 12) &&
2610 		display->dpll.ref_clks.nssc == 38400;
2611 }
2612 
2613 struct icl_combo_pll_params {
2614 	int clock;
2615 	struct skl_wrpll_params wrpll;
2616 };
2617 
2618 /*
2619  * These values alrea already adjusted: they're the bits we write to the
2620  * registers, not the logical values.
2621  */
2622 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2623 	{ 540000,
2624 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2625 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2626 	{ 270000,
2627 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2628 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2629 	{ 162000,
2630 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2631 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2632 	{ 324000,
2633 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2634 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2635 	{ 216000,
2636 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2637 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2638 	{ 432000,
2639 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2640 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2641 	{ 648000,
2642 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2643 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2644 	{ 810000,
2645 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2646 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2647 };
2648 
2649 
2650 /* Also used for 38.4 MHz values. */
2651 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2652 	{ 540000,
2653 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2654 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2655 	{ 270000,
2656 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2657 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2658 	{ 162000,
2659 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2660 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2661 	{ 324000,
2662 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2663 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2664 	{ 216000,
2665 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2666 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2667 	{ 432000,
2668 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2669 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2670 	{ 648000,
2671 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2672 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2673 	{ 810000,
2674 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2675 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2676 };
2677 
2678 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2679 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2680 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2681 };
2682 
2683 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2684 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2685 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2686 };
2687 
2688 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2689 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2690 	/* the following params are unused */
2691 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2692 };
2693 
2694 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2695 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2696 	/* the following params are unused */
2697 };
2698 
2699 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2700 				 struct skl_wrpll_params *pll_params)
2701 {
2702 	struct intel_display *display = to_intel_display(crtc_state);
2703 	const struct icl_combo_pll_params *params =
2704 		display->dpll.ref_clks.nssc == 24000 ?
2705 		icl_dp_combo_pll_24MHz_values :
2706 		icl_dp_combo_pll_19_2MHz_values;
2707 	int clock = crtc_state->port_clock;
2708 	int i;
2709 
2710 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2711 		if (clock == params[i].clock) {
2712 			*pll_params = params[i].wrpll;
2713 			return 0;
2714 		}
2715 	}
2716 
2717 	MISSING_CASE(clock);
2718 	return -EINVAL;
2719 }
2720 
2721 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2722 			    struct skl_wrpll_params *pll_params)
2723 {
2724 	struct intel_display *display = to_intel_display(crtc_state);
2725 
2726 	if (DISPLAY_VER(display) >= 12) {
2727 		switch (display->dpll.ref_clks.nssc) {
2728 		default:
2729 			MISSING_CASE(display->dpll.ref_clks.nssc);
2730 			fallthrough;
2731 		case 19200:
2732 		case 38400:
2733 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2734 			break;
2735 		case 24000:
2736 			*pll_params = tgl_tbt_pll_24MHz_values;
2737 			break;
2738 		}
2739 	} else {
2740 		switch (display->dpll.ref_clks.nssc) {
2741 		default:
2742 			MISSING_CASE(display->dpll.ref_clks.nssc);
2743 			fallthrough;
2744 		case 19200:
2745 		case 38400:
2746 			*pll_params = icl_tbt_pll_19_2MHz_values;
2747 			break;
2748 		case 24000:
2749 			*pll_params = icl_tbt_pll_24MHz_values;
2750 			break;
2751 		}
2752 	}
2753 
2754 	return 0;
2755 }
2756 
2757 static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
2758 				    const struct intel_shared_dpll *pll,
2759 				    const struct intel_dpll_hw_state *dpll_hw_state)
2760 {
2761 	/*
2762 	 * The PLL outputs multiple frequencies at the same time, selection is
2763 	 * made at DDI clock mux level.
2764 	 */
2765 	drm_WARN_ON(display->drm, 1);
2766 
2767 	return 0;
2768 }
2769 
2770 static int icl_wrpll_ref_clock(struct intel_display *display)
2771 {
2772 	int ref_clock = display->dpll.ref_clks.nssc;
2773 
2774 	/*
2775 	 * For ICL+, the spec states: if reference frequency is 38.4,
2776 	 * use 19.2 because the DPLL automatically divides that by 2.
2777 	 */
2778 	if (ref_clock == 38400)
2779 		ref_clock = 19200;
2780 
2781 	return ref_clock;
2782 }
2783 
2784 static int
2785 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2786 	       struct skl_wrpll_params *wrpll_params)
2787 {
2788 	struct intel_display *display = to_intel_display(crtc_state);
2789 	int ref_clock = icl_wrpll_ref_clock(display);
2790 	u32 afe_clock = crtc_state->port_clock * 5;
2791 	u32 dco_min = 7998000;
2792 	u32 dco_max = 10000000;
2793 	u32 dco_mid = (dco_min + dco_max) / 2;
2794 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2795 					 18, 20, 24, 28, 30, 32,  36,  40,
2796 					 42, 44, 48, 50, 52, 54,  56,  60,
2797 					 64, 66, 68, 70, 72, 76,  78,  80,
2798 					 84, 88, 90, 92, 96, 98, 100, 102,
2799 					  3,  5,  7,  9, 15, 21 };
2800 	u32 dco, best_dco = 0, dco_centrality = 0;
2801 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2802 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2803 
2804 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2805 		dco = afe_clock * dividers[d];
2806 
2807 		if (dco <= dco_max && dco >= dco_min) {
2808 			dco_centrality = abs(dco - dco_mid);
2809 
2810 			if (dco_centrality < best_dco_centrality) {
2811 				best_dco_centrality = dco_centrality;
2812 				best_div = dividers[d];
2813 				best_dco = dco;
2814 			}
2815 		}
2816 	}
2817 
2818 	if (best_div == 0)
2819 		return -EINVAL;
2820 
2821 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2822 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2823 				  pdiv, qdiv, kdiv);
2824 
2825 	return 0;
2826 }
2827 
2828 static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
2829 				      const struct intel_shared_dpll *pll,
2830 				      const struct intel_dpll_hw_state *dpll_hw_state)
2831 {
2832 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2833 	int ref_clock = icl_wrpll_ref_clock(display);
2834 	u32 dco_fraction;
2835 	u32 p0, p1, p2, dco_freq;
2836 
2837 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2838 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2839 
2840 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2841 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2842 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2843 	else
2844 		p1 = 1;
2845 
2846 	switch (p0) {
2847 	case DPLL_CFGCR1_PDIV_2:
2848 		p0 = 2;
2849 		break;
2850 	case DPLL_CFGCR1_PDIV_3:
2851 		p0 = 3;
2852 		break;
2853 	case DPLL_CFGCR1_PDIV_5:
2854 		p0 = 5;
2855 		break;
2856 	case DPLL_CFGCR1_PDIV_7:
2857 		p0 = 7;
2858 		break;
2859 	}
2860 
2861 	switch (p2) {
2862 	case DPLL_CFGCR1_KDIV_1:
2863 		p2 = 1;
2864 		break;
2865 	case DPLL_CFGCR1_KDIV_2:
2866 		p2 = 2;
2867 		break;
2868 	case DPLL_CFGCR1_KDIV_3:
2869 		p2 = 3;
2870 		break;
2871 	}
2872 
2873 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2874 		   ref_clock;
2875 
2876 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2877 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2878 
2879 	if (ehl_combo_pll_div_frac_wa_needed(display))
2880 		dco_fraction *= 2;
2881 
2882 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2883 
2884 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
2885 		return 0;
2886 
2887 	return dco_freq / (p0 * p1 * p2 * 5);
2888 }
2889 
2890 static void icl_calc_dpll_state(struct intel_display *display,
2891 				const struct skl_wrpll_params *pll_params,
2892 				struct intel_dpll_hw_state *dpll_hw_state)
2893 {
2894 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2895 	u32 dco_fraction = pll_params->dco_fraction;
2896 
2897 	if (ehl_combo_pll_div_frac_wa_needed(display))
2898 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2899 
2900 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2901 			    pll_params->dco_integer;
2902 
2903 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2904 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2905 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2906 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2907 
2908 	if (DISPLAY_VER(display) >= 12)
2909 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2910 	else
2911 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2912 
2913 	if (display->vbt.override_afc_startup)
2914 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(display->vbt.override_afc_startup_val);
2915 }
2916 
2917 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2918 				    u32 *target_dco_khz,
2919 				    struct icl_dpll_hw_state *hw_state,
2920 				    bool is_dkl)
2921 {
2922 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2923 	u32 dco_min_freq, dco_max_freq;
2924 	unsigned int i;
2925 	int div2;
2926 
2927 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2928 	dco_max_freq = is_dp ? 8100000 : 10000000;
2929 
2930 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2931 		int div1 = div1_vals[i];
2932 
2933 		for (div2 = 10; div2 > 0; div2--) {
2934 			int dco = div1 * div2 * clock_khz * 5;
2935 			int a_divratio, tlinedrv, inputsel;
2936 			u32 hsdiv;
2937 
2938 			if (dco < dco_min_freq || dco > dco_max_freq)
2939 				continue;
2940 
2941 			if (div2 >= 2) {
2942 				/*
2943 				 * Note: a_divratio not matching TGL BSpec
2944 				 * algorithm but matching hardcoded values and
2945 				 * working on HW for DP alt-mode at least
2946 				 */
2947 				a_divratio = is_dp ? 10 : 5;
2948 				tlinedrv = is_dkl ? 1 : 2;
2949 			} else {
2950 				a_divratio = 5;
2951 				tlinedrv = 0;
2952 			}
2953 			inputsel = is_dp ? 0 : 1;
2954 
2955 			switch (div1) {
2956 			default:
2957 				MISSING_CASE(div1);
2958 				fallthrough;
2959 			case 2:
2960 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2961 				break;
2962 			case 3:
2963 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2964 				break;
2965 			case 5:
2966 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2967 				break;
2968 			case 7:
2969 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2970 				break;
2971 			}
2972 
2973 			*target_dco_khz = dco;
2974 
2975 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2976 
2977 			hw_state->mg_clktop2_coreclkctl1 =
2978 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2979 
2980 			hw_state->mg_clktop2_hsclkctl =
2981 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2982 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2983 				hsdiv |
2984 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2985 
2986 			return 0;
2987 		}
2988 	}
2989 
2990 	return -EINVAL;
2991 }
2992 
2993 /*
2994  * The specification for this function uses real numbers, so the math had to be
2995  * adapted to integer-only calculation, that's why it looks so different.
2996  */
2997 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2998 				 struct intel_dpll_hw_state *dpll_hw_state)
2999 {
3000 	struct intel_display *display = to_intel_display(crtc_state);
3001 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3002 	int refclk_khz = display->dpll.ref_clks.nssc;
3003 	int clock = crtc_state->port_clock;
3004 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3005 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3006 	u32 prop_coeff, int_coeff;
3007 	u32 tdc_targetcnt, feedfwgain;
3008 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3009 	u64 tmp;
3010 	bool use_ssc = false;
3011 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3012 	bool is_dkl = DISPLAY_VER(display) >= 12;
3013 	int ret;
3014 
3015 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3016 				       hw_state, is_dkl);
3017 	if (ret)
3018 		return ret;
3019 
3020 	m1div = 2;
3021 	m2div_int = dco_khz / (refclk_khz * m1div);
3022 	if (m2div_int > 255) {
3023 		if (!is_dkl) {
3024 			m1div = 4;
3025 			m2div_int = dco_khz / (refclk_khz * m1div);
3026 		}
3027 
3028 		if (m2div_int > 255)
3029 			return -EINVAL;
3030 	}
3031 	m2div_rem = dco_khz % (refclk_khz * m1div);
3032 
3033 	tmp = (u64)m2div_rem * (1 << 22);
3034 	do_div(tmp, refclk_khz * m1div);
3035 	m2div_frac = tmp;
3036 
3037 	switch (refclk_khz) {
3038 	case 19200:
3039 		iref_ndiv = 1;
3040 		iref_trim = 28;
3041 		iref_pulse_w = 1;
3042 		break;
3043 	case 24000:
3044 		iref_ndiv = 1;
3045 		iref_trim = 25;
3046 		iref_pulse_w = 2;
3047 		break;
3048 	case 38400:
3049 		iref_ndiv = 2;
3050 		iref_trim = 28;
3051 		iref_pulse_w = 1;
3052 		break;
3053 	default:
3054 		MISSING_CASE(refclk_khz);
3055 		return -EINVAL;
3056 	}
3057 
3058 	/*
3059 	 * tdc_res = 0.000003
3060 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3061 	 *
3062 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3063 	 * was supposed to be a division, but we rearranged the operations of
3064 	 * the formula to avoid early divisions so we don't multiply the
3065 	 * rounding errors.
3066 	 *
3067 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3068 	 * we also rearrange to work with integers.
3069 	 *
3070 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3071 	 * last division by 10.
3072 	 */
3073 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3074 
3075 	/*
3076 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3077 	 * 32 bits. That's not a problem since we round the division down
3078 	 * anyway.
3079 	 */
3080 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3081 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3082 
3083 	if (dco_khz >= 9000000) {
3084 		prop_coeff = 5;
3085 		int_coeff = 10;
3086 	} else {
3087 		prop_coeff = 4;
3088 		int_coeff = 8;
3089 	}
3090 
3091 	if (use_ssc) {
3092 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3093 		do_div(tmp, refclk_khz * m1div * 10000);
3094 		ssc_stepsize = tmp;
3095 
3096 		tmp = mul_u32_u32(dco_khz, 1000);
3097 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3098 	} else {
3099 		ssc_stepsize = 0;
3100 		ssc_steplen = 0;
3101 	}
3102 	ssc_steplog = 4;
3103 
3104 	/* write pll_state calculations */
3105 	if (is_dkl) {
3106 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3107 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3108 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3109 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3110 		if (display->vbt.override_afc_startup) {
3111 			u8 val = display->vbt.override_afc_startup_val;
3112 
3113 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3114 		}
3115 
3116 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3117 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3118 
3119 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3120 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3121 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3122 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3123 
3124 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3125 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3126 
3127 		hw_state->mg_pll_tdc_coldst_bias =
3128 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3129 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3130 
3131 	} else {
3132 		hw_state->mg_pll_div0 =
3133 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3134 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3135 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3136 
3137 		hw_state->mg_pll_div1 =
3138 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3139 			MG_PLL_DIV1_DITHER_DIV_2 |
3140 			MG_PLL_DIV1_NDIVRATIO(1) |
3141 			MG_PLL_DIV1_FBPREDIV(m1div);
3142 
3143 		hw_state->mg_pll_lf =
3144 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3145 			MG_PLL_LF_AFCCNTSEL_512 |
3146 			MG_PLL_LF_GAINCTRL(1) |
3147 			MG_PLL_LF_INT_COEFF(int_coeff) |
3148 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3149 
3150 		hw_state->mg_pll_frac_lock =
3151 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3152 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3153 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3154 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3155 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3156 		if (use_ssc || m2div_rem > 0)
3157 			hw_state->mg_pll_frac_lock |=
3158 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3159 
3160 		hw_state->mg_pll_ssc =
3161 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3162 			MG_PLL_SSC_TYPE(2) |
3163 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3164 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3165 			MG_PLL_SSC_FLLEN |
3166 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3167 
3168 		hw_state->mg_pll_tdc_coldst_bias =
3169 			MG_PLL_TDC_COLDST_COLDSTART |
3170 			MG_PLL_TDC_COLDST_IREFINT_EN |
3171 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3172 			MG_PLL_TDC_TDCOVCCORR_EN |
3173 			MG_PLL_TDC_TDCSEL(3);
3174 
3175 		hw_state->mg_pll_bias =
3176 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3177 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3178 			MG_PLL_BIAS_BIAS_BONUS(10) |
3179 			MG_PLL_BIAS_BIASCAL_EN |
3180 			MG_PLL_BIAS_CTRIM(12) |
3181 			MG_PLL_BIAS_VREF_RDAC(4) |
3182 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3183 
3184 		if (refclk_khz == 38400) {
3185 			hw_state->mg_pll_tdc_coldst_bias_mask =
3186 				MG_PLL_TDC_COLDST_COLDSTART;
3187 			hw_state->mg_pll_bias_mask = 0;
3188 		} else {
3189 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3190 			hw_state->mg_pll_bias_mask = -1U;
3191 		}
3192 
3193 		hw_state->mg_pll_tdc_coldst_bias &=
3194 			hw_state->mg_pll_tdc_coldst_bias_mask;
3195 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3196 	}
3197 
3198 	return 0;
3199 }
3200 
3201 static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
3202 				   const struct intel_shared_dpll *pll,
3203 				   const struct intel_dpll_hw_state *dpll_hw_state)
3204 {
3205 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3206 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3207 	u64 tmp;
3208 
3209 	ref_clock = display->dpll.ref_clks.nssc;
3210 
3211 	if (DISPLAY_VER(display) >= 12) {
3212 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3213 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3214 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3215 
3216 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3217 			m2_frac = hw_state->mg_pll_bias &
3218 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3219 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3220 		} else {
3221 			m2_frac = 0;
3222 		}
3223 	} else {
3224 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3225 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3226 
3227 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3228 			m2_frac = hw_state->mg_pll_div0 &
3229 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3230 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3231 		} else {
3232 			m2_frac = 0;
3233 		}
3234 	}
3235 
3236 	switch (hw_state->mg_clktop2_hsclkctl &
3237 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3238 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3239 		div1 = 2;
3240 		break;
3241 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3242 		div1 = 3;
3243 		break;
3244 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3245 		div1 = 5;
3246 		break;
3247 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3248 		div1 = 7;
3249 		break;
3250 	default:
3251 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3252 		return 0;
3253 	}
3254 
3255 	div2 = (hw_state->mg_clktop2_hsclkctl &
3256 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3257 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3258 
3259 	/* div2 value of 0 is same as 1 means no div */
3260 	if (div2 == 0)
3261 		div2 = 1;
3262 
3263 	/*
3264 	 * Adjust the original formula to delay the division by 2^22 in order to
3265 	 * minimize possible rounding errors.
3266 	 */
3267 	tmp = (u64)m1 * m2_int * ref_clock +
3268 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3269 	tmp = div_u64(tmp, 5 * div1 * div2);
3270 
3271 	return tmp;
3272 }
3273 
3274 /**
3275  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3276  * @crtc_state: state for the CRTC to select the DPLL for
3277  * @port_dpll_id: the active @port_dpll_id to select
3278  *
3279  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3280  * CRTC.
3281  */
3282 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3283 			      enum icl_port_dpll_id port_dpll_id)
3284 {
3285 	struct icl_port_dpll *port_dpll =
3286 		&crtc_state->icl_port_dplls[port_dpll_id];
3287 
3288 	crtc_state->shared_dpll = port_dpll->pll;
3289 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3290 }
3291 
3292 static void icl_update_active_dpll(struct intel_atomic_state *state,
3293 				   struct intel_crtc *crtc,
3294 				   struct intel_encoder *encoder)
3295 {
3296 	struct intel_crtc_state *crtc_state =
3297 		intel_atomic_get_new_crtc_state(state, crtc);
3298 	struct intel_digital_port *primary_port;
3299 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3300 
3301 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3302 		enc_to_mst(encoder)->primary :
3303 		enc_to_dig_port(encoder);
3304 
3305 	if (primary_port &&
3306 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3307 	     intel_tc_port_in_legacy_mode(primary_port)))
3308 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3309 
3310 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3311 }
3312 
3313 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3314 				      struct intel_crtc *crtc)
3315 {
3316 	struct intel_display *display = to_intel_display(state);
3317 	struct intel_crtc_state *crtc_state =
3318 		intel_atomic_get_new_crtc_state(state, crtc);
3319 	struct icl_port_dpll *port_dpll =
3320 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3321 	struct skl_wrpll_params pll_params = {};
3322 	int ret;
3323 
3324 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3325 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3326 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3327 	else
3328 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3329 
3330 	if (ret)
3331 		return ret;
3332 
3333 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3334 
3335 	/* this is mainly for the fastset check */
3336 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3337 
3338 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(display, NULL,
3339 							    &port_dpll->hw_state);
3340 
3341 	return 0;
3342 }
3343 
3344 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3345 				  struct intel_crtc *crtc,
3346 				  struct intel_encoder *encoder)
3347 {
3348 	struct intel_display *display = to_intel_display(crtc);
3349 	struct intel_crtc_state *crtc_state =
3350 		intel_atomic_get_new_crtc_state(state, crtc);
3351 	struct icl_port_dpll *port_dpll =
3352 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3353 	enum port port = encoder->port;
3354 	unsigned long dpll_mask;
3355 
3356 	if (display->platform.alderlake_s) {
3357 		dpll_mask =
3358 			BIT(DPLL_ID_DG1_DPLL3) |
3359 			BIT(DPLL_ID_DG1_DPLL2) |
3360 			BIT(DPLL_ID_ICL_DPLL1) |
3361 			BIT(DPLL_ID_ICL_DPLL0);
3362 	} else if (display->platform.dg1) {
3363 		if (port == PORT_D || port == PORT_E) {
3364 			dpll_mask =
3365 				BIT(DPLL_ID_DG1_DPLL2) |
3366 				BIT(DPLL_ID_DG1_DPLL3);
3367 		} else {
3368 			dpll_mask =
3369 				BIT(DPLL_ID_DG1_DPLL0) |
3370 				BIT(DPLL_ID_DG1_DPLL1);
3371 		}
3372 	} else if (display->platform.rocketlake) {
3373 		dpll_mask =
3374 			BIT(DPLL_ID_EHL_DPLL4) |
3375 			BIT(DPLL_ID_ICL_DPLL1) |
3376 			BIT(DPLL_ID_ICL_DPLL0);
3377 	} else if ((display->platform.jasperlake ||
3378 		    display->platform.elkhartlake) &&
3379 		   port != PORT_A) {
3380 		dpll_mask =
3381 			BIT(DPLL_ID_EHL_DPLL4) |
3382 			BIT(DPLL_ID_ICL_DPLL1) |
3383 			BIT(DPLL_ID_ICL_DPLL0);
3384 	} else {
3385 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3386 	}
3387 
3388 	/* Eliminate DPLLs from consideration if reserved by HTI */
3389 	dpll_mask &= ~intel_hti_dpll_mask(display);
3390 
3391 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3392 						&port_dpll->hw_state,
3393 						dpll_mask);
3394 	if (!port_dpll->pll)
3395 		return -EINVAL;
3396 
3397 	intel_reference_shared_dpll(state, crtc,
3398 				    port_dpll->pll, &port_dpll->hw_state);
3399 
3400 	icl_update_active_dpll(state, crtc, encoder);
3401 
3402 	return 0;
3403 }
3404 
3405 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3406 				    struct intel_crtc *crtc)
3407 {
3408 	struct intel_display *display = to_intel_display(state);
3409 	struct intel_crtc_state *crtc_state =
3410 		intel_atomic_get_new_crtc_state(state, crtc);
3411 	const struct intel_crtc_state *old_crtc_state =
3412 		intel_atomic_get_old_crtc_state(state, crtc);
3413 	struct icl_port_dpll *port_dpll =
3414 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3415 	struct skl_wrpll_params pll_params = {};
3416 	int ret;
3417 
3418 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3419 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3420 	if (ret)
3421 		return ret;
3422 
3423 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3424 
3425 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3426 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3427 	if (ret)
3428 		return ret;
3429 
3430 	/* this is mainly for the fastset check */
3431 	if (old_crtc_state->shared_dpll &&
3432 	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3433 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3434 	else
3435 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3436 
3437 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(display, NULL,
3438 							 &port_dpll->hw_state);
3439 
3440 	return 0;
3441 }
3442 
3443 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3444 				struct intel_crtc *crtc,
3445 				struct intel_encoder *encoder)
3446 {
3447 	struct intel_crtc_state *crtc_state =
3448 		intel_atomic_get_new_crtc_state(state, crtc);
3449 	struct icl_port_dpll *port_dpll =
3450 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3451 	enum intel_dpll_id dpll_id;
3452 	int ret;
3453 
3454 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3455 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3456 						&port_dpll->hw_state,
3457 						BIT(DPLL_ID_ICL_TBTPLL));
3458 	if (!port_dpll->pll)
3459 		return -EINVAL;
3460 	intel_reference_shared_dpll(state, crtc,
3461 				    port_dpll->pll, &port_dpll->hw_state);
3462 
3463 
3464 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3465 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3466 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3467 						&port_dpll->hw_state,
3468 						BIT(dpll_id));
3469 	if (!port_dpll->pll) {
3470 		ret = -EINVAL;
3471 		goto err_unreference_tbt_pll;
3472 	}
3473 	intel_reference_shared_dpll(state, crtc,
3474 				    port_dpll->pll, &port_dpll->hw_state);
3475 
3476 	icl_update_active_dpll(state, crtc, encoder);
3477 
3478 	return 0;
3479 
3480 err_unreference_tbt_pll:
3481 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3482 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3483 
3484 	return ret;
3485 }
3486 
3487 static int icl_compute_dplls(struct intel_atomic_state *state,
3488 			     struct intel_crtc *crtc,
3489 			     struct intel_encoder *encoder)
3490 {
3491 	if (intel_encoder_is_combo(encoder))
3492 		return icl_compute_combo_phy_dpll(state, crtc);
3493 	else if (intel_encoder_is_tc(encoder))
3494 		return icl_compute_tc_phy_dplls(state, crtc);
3495 
3496 	MISSING_CASE(encoder->port);
3497 
3498 	return 0;
3499 }
3500 
3501 static int icl_get_dplls(struct intel_atomic_state *state,
3502 			 struct intel_crtc *crtc,
3503 			 struct intel_encoder *encoder)
3504 {
3505 	if (intel_encoder_is_combo(encoder))
3506 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3507 	else if (intel_encoder_is_tc(encoder))
3508 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3509 
3510 	MISSING_CASE(encoder->port);
3511 
3512 	return -EINVAL;
3513 }
3514 
3515 static void icl_put_dplls(struct intel_atomic_state *state,
3516 			  struct intel_crtc *crtc)
3517 {
3518 	const struct intel_crtc_state *old_crtc_state =
3519 		intel_atomic_get_old_crtc_state(state, crtc);
3520 	struct intel_crtc_state *new_crtc_state =
3521 		intel_atomic_get_new_crtc_state(state, crtc);
3522 	enum icl_port_dpll_id id;
3523 
3524 	new_crtc_state->shared_dpll = NULL;
3525 
3526 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3527 		const struct icl_port_dpll *old_port_dpll =
3528 			&old_crtc_state->icl_port_dplls[id];
3529 		struct icl_port_dpll *new_port_dpll =
3530 			&new_crtc_state->icl_port_dplls[id];
3531 
3532 		new_port_dpll->pll = NULL;
3533 
3534 		if (!old_port_dpll->pll)
3535 			continue;
3536 
3537 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3538 	}
3539 }
3540 
3541 static bool mg_pll_get_hw_state(struct intel_display *display,
3542 				struct intel_shared_dpll *pll,
3543 				struct intel_dpll_hw_state *dpll_hw_state)
3544 {
3545 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3546 	const enum intel_dpll_id id = pll->info->id;
3547 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3548 	intel_wakeref_t wakeref;
3549 	bool ret = false;
3550 	u32 val;
3551 
3552 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
3553 
3554 	wakeref = intel_display_power_get_if_enabled(display,
3555 						     POWER_DOMAIN_DISPLAY_CORE);
3556 	if (!wakeref)
3557 		return false;
3558 
3559 	val = intel_de_read(display, enable_reg);
3560 	if (!(val & PLL_ENABLE))
3561 		goto out;
3562 
3563 	hw_state->mg_refclkin_ctl = intel_de_read(display,
3564 						  MG_REFCLKIN_CTL(tc_port));
3565 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3566 
3567 	hw_state->mg_clktop2_coreclkctl1 =
3568 		intel_de_read(display, MG_CLKTOP2_CORECLKCTL1(tc_port));
3569 	hw_state->mg_clktop2_coreclkctl1 &=
3570 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3571 
3572 	hw_state->mg_clktop2_hsclkctl =
3573 		intel_de_read(display, MG_CLKTOP2_HSCLKCTL(tc_port));
3574 	hw_state->mg_clktop2_hsclkctl &=
3575 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3576 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3577 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3578 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3579 
3580 	hw_state->mg_pll_div0 = intel_de_read(display, MG_PLL_DIV0(tc_port));
3581 	hw_state->mg_pll_div1 = intel_de_read(display, MG_PLL_DIV1(tc_port));
3582 	hw_state->mg_pll_lf = intel_de_read(display, MG_PLL_LF(tc_port));
3583 	hw_state->mg_pll_frac_lock = intel_de_read(display,
3584 						   MG_PLL_FRAC_LOCK(tc_port));
3585 	hw_state->mg_pll_ssc = intel_de_read(display, MG_PLL_SSC(tc_port));
3586 
3587 	hw_state->mg_pll_bias = intel_de_read(display, MG_PLL_BIAS(tc_port));
3588 	hw_state->mg_pll_tdc_coldst_bias =
3589 		intel_de_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3590 
3591 	if (display->dpll.ref_clks.nssc == 38400) {
3592 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3593 		hw_state->mg_pll_bias_mask = 0;
3594 	} else {
3595 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3596 		hw_state->mg_pll_bias_mask = -1U;
3597 	}
3598 
3599 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3600 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3601 
3602 	ret = true;
3603 out:
3604 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3605 	return ret;
3606 }
3607 
3608 static bool dkl_pll_get_hw_state(struct intel_display *display,
3609 				 struct intel_shared_dpll *pll,
3610 				 struct intel_dpll_hw_state *dpll_hw_state)
3611 {
3612 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3613 	const enum intel_dpll_id id = pll->info->id;
3614 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3615 	intel_wakeref_t wakeref;
3616 	bool ret = false;
3617 	u32 val;
3618 
3619 	wakeref = intel_display_power_get_if_enabled(display,
3620 						     POWER_DOMAIN_DISPLAY_CORE);
3621 	if (!wakeref)
3622 		return false;
3623 
3624 	val = intel_de_read(display, intel_tc_pll_enable_reg(display, pll));
3625 	if (!(val & PLL_ENABLE))
3626 		goto out;
3627 
3628 	/*
3629 	 * All registers read here have the same HIP_INDEX_REG even though
3630 	 * they are on different building blocks
3631 	 */
3632 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(display,
3633 						       DKL_REFCLKIN_CTL(tc_port));
3634 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3635 
3636 	hw_state->mg_clktop2_hsclkctl =
3637 		intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3638 	hw_state->mg_clktop2_hsclkctl &=
3639 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3640 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3641 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3642 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3643 
3644 	hw_state->mg_clktop2_coreclkctl1 =
3645 		intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3646 	hw_state->mg_clktop2_coreclkctl1 &=
3647 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3648 
3649 	hw_state->mg_pll_div0 = intel_dkl_phy_read(display, DKL_PLL_DIV0(tc_port));
3650 	val = DKL_PLL_DIV0_MASK;
3651 	if (display->vbt.override_afc_startup)
3652 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3653 	hw_state->mg_pll_div0 &= val;
3654 
3655 	hw_state->mg_pll_div1 = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3656 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3657 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3658 
3659 	hw_state->mg_pll_ssc = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3660 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3661 				 DKL_PLL_SSC_STEP_LEN_MASK |
3662 				 DKL_PLL_SSC_STEP_NUM_MASK |
3663 				 DKL_PLL_SSC_EN);
3664 
3665 	hw_state->mg_pll_bias = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3666 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3667 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3668 
3669 	hw_state->mg_pll_tdc_coldst_bias =
3670 		intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3671 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3672 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3673 
3674 	ret = true;
3675 out:
3676 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3677 	return ret;
3678 }
3679 
3680 static bool icl_pll_get_hw_state(struct intel_display *display,
3681 				 struct intel_shared_dpll *pll,
3682 				 struct intel_dpll_hw_state *dpll_hw_state,
3683 				 i915_reg_t enable_reg)
3684 {
3685 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3686 	const enum intel_dpll_id id = pll->info->id;
3687 	intel_wakeref_t wakeref;
3688 	bool ret = false;
3689 	u32 val;
3690 
3691 	wakeref = intel_display_power_get_if_enabled(display,
3692 						     POWER_DOMAIN_DISPLAY_CORE);
3693 	if (!wakeref)
3694 		return false;
3695 
3696 	val = intel_de_read(display, enable_reg);
3697 	if (!(val & PLL_ENABLE))
3698 		goto out;
3699 
3700 	if (display->platform.alderlake_s) {
3701 		hw_state->cfgcr0 = intel_de_read(display, ADLS_DPLL_CFGCR0(id));
3702 		hw_state->cfgcr1 = intel_de_read(display, ADLS_DPLL_CFGCR1(id));
3703 	} else if (display->platform.dg1) {
3704 		hw_state->cfgcr0 = intel_de_read(display, DG1_DPLL_CFGCR0(id));
3705 		hw_state->cfgcr1 = intel_de_read(display, DG1_DPLL_CFGCR1(id));
3706 	} else if (display->platform.rocketlake) {
3707 		hw_state->cfgcr0 = intel_de_read(display,
3708 						 RKL_DPLL_CFGCR0(id));
3709 		hw_state->cfgcr1 = intel_de_read(display,
3710 						 RKL_DPLL_CFGCR1(id));
3711 	} else if (DISPLAY_VER(display) >= 12) {
3712 		hw_state->cfgcr0 = intel_de_read(display,
3713 						 TGL_DPLL_CFGCR0(id));
3714 		hw_state->cfgcr1 = intel_de_read(display,
3715 						 TGL_DPLL_CFGCR1(id));
3716 		if (display->vbt.override_afc_startup) {
3717 			hw_state->div0 = intel_de_read(display, TGL_DPLL0_DIV0(id));
3718 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3719 		}
3720 	} else {
3721 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3722 		    id == DPLL_ID_EHL_DPLL4) {
3723 			hw_state->cfgcr0 = intel_de_read(display,
3724 							 ICL_DPLL_CFGCR0(4));
3725 			hw_state->cfgcr1 = intel_de_read(display,
3726 							 ICL_DPLL_CFGCR1(4));
3727 		} else {
3728 			hw_state->cfgcr0 = intel_de_read(display,
3729 							 ICL_DPLL_CFGCR0(id));
3730 			hw_state->cfgcr1 = intel_de_read(display,
3731 							 ICL_DPLL_CFGCR1(id));
3732 		}
3733 	}
3734 
3735 	ret = true;
3736 out:
3737 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3738 	return ret;
3739 }
3740 
3741 static bool combo_pll_get_hw_state(struct intel_display *display,
3742 				   struct intel_shared_dpll *pll,
3743 				   struct intel_dpll_hw_state *dpll_hw_state)
3744 {
3745 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3746 
3747 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
3748 }
3749 
3750 static bool tbt_pll_get_hw_state(struct intel_display *display,
3751 				 struct intel_shared_dpll *pll,
3752 				 struct intel_dpll_hw_state *dpll_hw_state)
3753 {
3754 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
3755 }
3756 
3757 static void icl_dpll_write(struct intel_display *display,
3758 			   struct intel_shared_dpll *pll,
3759 			   const struct icl_dpll_hw_state *hw_state)
3760 {
3761 	const enum intel_dpll_id id = pll->info->id;
3762 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3763 
3764 	if (display->platform.alderlake_s) {
3765 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3766 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3767 	} else if (display->platform.dg1) {
3768 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3769 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3770 	} else if (display->platform.rocketlake) {
3771 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3772 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3773 	} else if (DISPLAY_VER(display) >= 12) {
3774 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3775 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3776 		div0_reg = TGL_DPLL0_DIV0(id);
3777 	} else {
3778 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3779 		    id == DPLL_ID_EHL_DPLL4) {
3780 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3781 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3782 		} else {
3783 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3784 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3785 		}
3786 	}
3787 
3788 	intel_de_write(display, cfgcr0_reg, hw_state->cfgcr0);
3789 	intel_de_write(display, cfgcr1_reg, hw_state->cfgcr1);
3790 	drm_WARN_ON_ONCE(display->drm, display->vbt.override_afc_startup &&
3791 			 !i915_mmio_reg_valid(div0_reg));
3792 	if (display->vbt.override_afc_startup &&
3793 	    i915_mmio_reg_valid(div0_reg))
3794 		intel_de_rmw(display, div0_reg,
3795 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3796 	intel_de_posting_read(display, cfgcr1_reg);
3797 }
3798 
3799 static void icl_mg_pll_write(struct intel_display *display,
3800 			     struct intel_shared_dpll *pll,
3801 			     const struct icl_dpll_hw_state *hw_state)
3802 {
3803 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3804 
3805 	/*
3806 	 * Some of the following registers have reserved fields, so program
3807 	 * these with RMW based on a mask. The mask can be fixed or generated
3808 	 * during the calc/readout phase if the mask depends on some other HW
3809 	 * state like refclk, see icl_calc_mg_pll_state().
3810 	 */
3811 	intel_de_rmw(display, MG_REFCLKIN_CTL(tc_port),
3812 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3813 
3814 	intel_de_rmw(display, MG_CLKTOP2_CORECLKCTL1(tc_port),
3815 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3816 		     hw_state->mg_clktop2_coreclkctl1);
3817 
3818 	intel_de_rmw(display, MG_CLKTOP2_HSCLKCTL(tc_port),
3819 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3820 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3821 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3822 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3823 		     hw_state->mg_clktop2_hsclkctl);
3824 
3825 	intel_de_write(display, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3826 	intel_de_write(display, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3827 	intel_de_write(display, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3828 	intel_de_write(display, MG_PLL_FRAC_LOCK(tc_port),
3829 		       hw_state->mg_pll_frac_lock);
3830 	intel_de_write(display, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3831 
3832 	intel_de_rmw(display, MG_PLL_BIAS(tc_port),
3833 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3834 
3835 	intel_de_rmw(display, MG_PLL_TDC_COLDST_BIAS(tc_port),
3836 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3837 		     hw_state->mg_pll_tdc_coldst_bias);
3838 
3839 	intel_de_posting_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3840 }
3841 
3842 static void dkl_pll_write(struct intel_display *display,
3843 			  struct intel_shared_dpll *pll,
3844 			  const struct icl_dpll_hw_state *hw_state)
3845 {
3846 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3847 	u32 val;
3848 
3849 	/*
3850 	 * All registers programmed here have the same HIP_INDEX_REG even
3851 	 * though on different building block
3852 	 */
3853 	/* All the registers are RMW */
3854 	val = intel_dkl_phy_read(display, DKL_REFCLKIN_CTL(tc_port));
3855 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3856 	val |= hw_state->mg_refclkin_ctl;
3857 	intel_dkl_phy_write(display, DKL_REFCLKIN_CTL(tc_port), val);
3858 
3859 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3860 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3861 	val |= hw_state->mg_clktop2_coreclkctl1;
3862 	intel_dkl_phy_write(display, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3863 
3864 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3865 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3866 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3867 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3868 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3869 	val |= hw_state->mg_clktop2_hsclkctl;
3870 	intel_dkl_phy_write(display, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3871 
3872 	val = DKL_PLL_DIV0_MASK;
3873 	if (display->vbt.override_afc_startup)
3874 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3875 	intel_dkl_phy_rmw(display, DKL_PLL_DIV0(tc_port), val,
3876 			  hw_state->mg_pll_div0);
3877 
3878 	val = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3879 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3880 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3881 	val |= hw_state->mg_pll_div1;
3882 	intel_dkl_phy_write(display, DKL_PLL_DIV1(tc_port), val);
3883 
3884 	val = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3885 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3886 		 DKL_PLL_SSC_STEP_LEN_MASK |
3887 		 DKL_PLL_SSC_STEP_NUM_MASK |
3888 		 DKL_PLL_SSC_EN);
3889 	val |= hw_state->mg_pll_ssc;
3890 	intel_dkl_phy_write(display, DKL_PLL_SSC(tc_port), val);
3891 
3892 	val = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3893 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3894 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3895 	val |= hw_state->mg_pll_bias;
3896 	intel_dkl_phy_write(display, DKL_PLL_BIAS(tc_port), val);
3897 
3898 	val = intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3899 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3900 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3901 	val |= hw_state->mg_pll_tdc_coldst_bias;
3902 	intel_dkl_phy_write(display, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3903 
3904 	intel_dkl_phy_posting_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3905 }
3906 
3907 static void icl_pll_power_enable(struct intel_display *display,
3908 				 struct intel_shared_dpll *pll,
3909 				 i915_reg_t enable_reg)
3910 {
3911 	intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
3912 
3913 	/*
3914 	 * The spec says we need to "wait" but it also says it should be
3915 	 * immediate.
3916 	 */
3917 	if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1))
3918 		drm_err(display->drm, "PLL %d Power not enabled\n",
3919 			pll->info->id);
3920 }
3921 
3922 static void icl_pll_enable(struct intel_display *display,
3923 			   struct intel_shared_dpll *pll,
3924 			   i915_reg_t enable_reg)
3925 {
3926 	intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
3927 
3928 	/* Timeout is actually 600us. */
3929 	if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1))
3930 		drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
3931 }
3932 
3933 static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_shared_dpll *pll)
3934 {
3935 	u32 val;
3936 
3937 	if (!(display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) ||
3938 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3939 		return;
3940 	/*
3941 	 * Wa_16011069516:adl-p[a0]
3942 	 *
3943 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3944 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3945 	 * sanity check this assumption with a double read, which presumably
3946 	 * returns the correct value even with clock gating on.
3947 	 *
3948 	 * Instead of the usual place for workarounds we apply this one here,
3949 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3950 	 */
3951 	val = intel_de_read(display, TRANS_CMTG_CHICKEN);
3952 	val = intel_de_rmw(display, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3953 	if (drm_WARN_ON(display->drm, val & ~DISABLE_DPT_CLK_GATING))
3954 		drm_dbg_kms(display->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3955 }
3956 
3957 static void combo_pll_enable(struct intel_display *display,
3958 			     struct intel_shared_dpll *pll,
3959 			     const struct intel_dpll_hw_state *dpll_hw_state)
3960 {
3961 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3962 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3963 
3964 	icl_pll_power_enable(display, pll, enable_reg);
3965 
3966 	icl_dpll_write(display, pll, hw_state);
3967 
3968 	/*
3969 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3970 	 * paths should already be setting the appropriate voltage, hence we do
3971 	 * nothing here.
3972 	 */
3973 
3974 	icl_pll_enable(display, pll, enable_reg);
3975 
3976 	adlp_cmtg_clock_gating_wa(display, pll);
3977 
3978 	/* DVFS post sequence would be here. See the comment above. */
3979 }
3980 
3981 static void tbt_pll_enable(struct intel_display *display,
3982 			   struct intel_shared_dpll *pll,
3983 			   const struct intel_dpll_hw_state *dpll_hw_state)
3984 {
3985 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3986 
3987 	icl_pll_power_enable(display, pll, TBT_PLL_ENABLE);
3988 
3989 	icl_dpll_write(display, pll, hw_state);
3990 
3991 	/*
3992 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3993 	 * paths should already be setting the appropriate voltage, hence we do
3994 	 * nothing here.
3995 	 */
3996 
3997 	icl_pll_enable(display, pll, TBT_PLL_ENABLE);
3998 
3999 	/* DVFS post sequence would be here. See the comment above. */
4000 }
4001 
4002 static void mg_pll_enable(struct intel_display *display,
4003 			  struct intel_shared_dpll *pll,
4004 			  const struct intel_dpll_hw_state *dpll_hw_state)
4005 {
4006 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4007 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4008 
4009 	icl_pll_power_enable(display, pll, enable_reg);
4010 
4011 	if (DISPLAY_VER(display) >= 12)
4012 		dkl_pll_write(display, pll, hw_state);
4013 	else
4014 		icl_mg_pll_write(display, pll, hw_state);
4015 
4016 	/*
4017 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4018 	 * paths should already be setting the appropriate voltage, hence we do
4019 	 * nothing here.
4020 	 */
4021 
4022 	icl_pll_enable(display, pll, enable_reg);
4023 
4024 	/* DVFS post sequence would be here. See the comment above. */
4025 }
4026 
4027 static void icl_pll_disable(struct intel_display *display,
4028 			    struct intel_shared_dpll *pll,
4029 			    i915_reg_t enable_reg)
4030 {
4031 	/* The first steps are done by intel_ddi_post_disable(). */
4032 
4033 	/*
4034 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4035 	 * paths should already be setting the appropriate voltage, hence we do
4036 	 * nothing here.
4037 	 */
4038 
4039 	intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
4040 
4041 	/* Timeout is actually 1us. */
4042 	if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1))
4043 		drm_err(display->drm, "PLL %d locked\n", pll->info->id);
4044 
4045 	/* DVFS post sequence would be here. See the comment above. */
4046 
4047 	intel_de_rmw(display, enable_reg, PLL_POWER_ENABLE, 0);
4048 
4049 	/*
4050 	 * The spec says we need to "wait" but it also says it should be
4051 	 * immediate.
4052 	 */
4053 	if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1))
4054 		drm_err(display->drm, "PLL %d Power not disabled\n",
4055 			pll->info->id);
4056 }
4057 
4058 static void combo_pll_disable(struct intel_display *display,
4059 			      struct intel_shared_dpll *pll)
4060 {
4061 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
4062 
4063 	icl_pll_disable(display, pll, enable_reg);
4064 }
4065 
4066 static void tbt_pll_disable(struct intel_display *display,
4067 			    struct intel_shared_dpll *pll)
4068 {
4069 	icl_pll_disable(display, pll, TBT_PLL_ENABLE);
4070 }
4071 
4072 static void mg_pll_disable(struct intel_display *display,
4073 			   struct intel_shared_dpll *pll)
4074 {
4075 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4076 
4077 	icl_pll_disable(display, pll, enable_reg);
4078 }
4079 
4080 static void icl_update_dpll_ref_clks(struct intel_display *display)
4081 {
4082 	/* No SSC ref */
4083 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
4084 }
4085 
4086 static void icl_dump_hw_state(struct drm_printer *p,
4087 			      const struct intel_dpll_hw_state *dpll_hw_state)
4088 {
4089 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4090 
4091 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4092 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4093 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4094 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4095 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4096 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4097 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4098 		   hw_state->mg_refclkin_ctl,
4099 		   hw_state->mg_clktop2_coreclkctl1,
4100 		   hw_state->mg_clktop2_hsclkctl,
4101 		   hw_state->mg_pll_div0,
4102 		   hw_state->mg_pll_div1,
4103 		   hw_state->mg_pll_lf,
4104 		   hw_state->mg_pll_frac_lock,
4105 		   hw_state->mg_pll_ssc,
4106 		   hw_state->mg_pll_bias,
4107 		   hw_state->mg_pll_tdc_coldst_bias);
4108 }
4109 
4110 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4111 				 const struct intel_dpll_hw_state *_b)
4112 {
4113 	const struct icl_dpll_hw_state *a = &_a->icl;
4114 	const struct icl_dpll_hw_state *b = &_b->icl;
4115 
4116 	/* FIXME split combo vs. mg more thoroughly */
4117 	return a->cfgcr0 == b->cfgcr0 &&
4118 		a->cfgcr1 == b->cfgcr1 &&
4119 		a->div0 == b->div0 &&
4120 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4121 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4122 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4123 		a->mg_pll_div0 == b->mg_pll_div0 &&
4124 		a->mg_pll_div1 == b->mg_pll_div1 &&
4125 		a->mg_pll_lf == b->mg_pll_lf &&
4126 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4127 		a->mg_pll_ssc == b->mg_pll_ssc &&
4128 		a->mg_pll_bias == b->mg_pll_bias &&
4129 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4130 }
4131 
4132 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4133 	.enable = combo_pll_enable,
4134 	.disable = combo_pll_disable,
4135 	.get_hw_state = combo_pll_get_hw_state,
4136 	.get_freq = icl_ddi_combo_pll_get_freq,
4137 };
4138 
4139 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4140 	.enable = tbt_pll_enable,
4141 	.disable = tbt_pll_disable,
4142 	.get_hw_state = tbt_pll_get_hw_state,
4143 	.get_freq = icl_ddi_tbt_pll_get_freq,
4144 };
4145 
4146 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4147 	.enable = mg_pll_enable,
4148 	.disable = mg_pll_disable,
4149 	.get_hw_state = mg_pll_get_hw_state,
4150 	.get_freq = icl_ddi_mg_pll_get_freq,
4151 };
4152 
4153 static const struct dpll_info icl_plls[] = {
4154 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4155 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4156 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4157 	  .is_alt_port_dpll = true, },
4158 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4159 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4160 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4161 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4162 	{}
4163 };
4164 
4165 static const struct intel_dpll_mgr icl_pll_mgr = {
4166 	.dpll_info = icl_plls,
4167 	.compute_dplls = icl_compute_dplls,
4168 	.get_dplls = icl_get_dplls,
4169 	.put_dplls = icl_put_dplls,
4170 	.update_active_dpll = icl_update_active_dpll,
4171 	.update_ref_clks = icl_update_dpll_ref_clks,
4172 	.dump_hw_state = icl_dump_hw_state,
4173 	.compare_hw_state = icl_compare_hw_state,
4174 };
4175 
4176 static const struct dpll_info ehl_plls[] = {
4177 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4178 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4179 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4180 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4181 	{}
4182 };
4183 
4184 static const struct intel_dpll_mgr ehl_pll_mgr = {
4185 	.dpll_info = ehl_plls,
4186 	.compute_dplls = icl_compute_dplls,
4187 	.get_dplls = icl_get_dplls,
4188 	.put_dplls = icl_put_dplls,
4189 	.update_ref_clks = icl_update_dpll_ref_clks,
4190 	.dump_hw_state = icl_dump_hw_state,
4191 	.compare_hw_state = icl_compare_hw_state,
4192 };
4193 
4194 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4195 	.enable = mg_pll_enable,
4196 	.disable = mg_pll_disable,
4197 	.get_hw_state = dkl_pll_get_hw_state,
4198 	.get_freq = icl_ddi_mg_pll_get_freq,
4199 };
4200 
4201 static const struct dpll_info tgl_plls[] = {
4202 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4203 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4204 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4205 	  .is_alt_port_dpll = true, },
4206 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4207 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4208 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4209 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4210 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4211 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4212 	{}
4213 };
4214 
4215 static const struct intel_dpll_mgr tgl_pll_mgr = {
4216 	.dpll_info = tgl_plls,
4217 	.compute_dplls = icl_compute_dplls,
4218 	.get_dplls = icl_get_dplls,
4219 	.put_dplls = icl_put_dplls,
4220 	.update_active_dpll = icl_update_active_dpll,
4221 	.update_ref_clks = icl_update_dpll_ref_clks,
4222 	.dump_hw_state = icl_dump_hw_state,
4223 	.compare_hw_state = icl_compare_hw_state,
4224 };
4225 
4226 static const struct dpll_info rkl_plls[] = {
4227 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4228 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4229 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4230 	{}
4231 };
4232 
4233 static const struct intel_dpll_mgr rkl_pll_mgr = {
4234 	.dpll_info = rkl_plls,
4235 	.compute_dplls = icl_compute_dplls,
4236 	.get_dplls = icl_get_dplls,
4237 	.put_dplls = icl_put_dplls,
4238 	.update_ref_clks = icl_update_dpll_ref_clks,
4239 	.dump_hw_state = icl_dump_hw_state,
4240 	.compare_hw_state = icl_compare_hw_state,
4241 };
4242 
4243 static const struct dpll_info dg1_plls[] = {
4244 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4245 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4246 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4247 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4248 	{}
4249 };
4250 
4251 static const struct intel_dpll_mgr dg1_pll_mgr = {
4252 	.dpll_info = dg1_plls,
4253 	.compute_dplls = icl_compute_dplls,
4254 	.get_dplls = icl_get_dplls,
4255 	.put_dplls = icl_put_dplls,
4256 	.update_ref_clks = icl_update_dpll_ref_clks,
4257 	.dump_hw_state = icl_dump_hw_state,
4258 	.compare_hw_state = icl_compare_hw_state,
4259 };
4260 
4261 static const struct dpll_info adls_plls[] = {
4262 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4263 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4264 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4265 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4266 	{}
4267 };
4268 
4269 static const struct intel_dpll_mgr adls_pll_mgr = {
4270 	.dpll_info = adls_plls,
4271 	.compute_dplls = icl_compute_dplls,
4272 	.get_dplls = icl_get_dplls,
4273 	.put_dplls = icl_put_dplls,
4274 	.update_ref_clks = icl_update_dpll_ref_clks,
4275 	.dump_hw_state = icl_dump_hw_state,
4276 	.compare_hw_state = icl_compare_hw_state,
4277 };
4278 
4279 static const struct dpll_info adlp_plls[] = {
4280 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4281 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4282 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4283 	  .is_alt_port_dpll = true, },
4284 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4285 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4286 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4287 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4288 	{}
4289 };
4290 
4291 static const struct intel_dpll_mgr adlp_pll_mgr = {
4292 	.dpll_info = adlp_plls,
4293 	.compute_dplls = icl_compute_dplls,
4294 	.get_dplls = icl_get_dplls,
4295 	.put_dplls = icl_put_dplls,
4296 	.update_active_dpll = icl_update_active_dpll,
4297 	.update_ref_clks = icl_update_dpll_ref_clks,
4298 	.dump_hw_state = icl_dump_hw_state,
4299 	.compare_hw_state = icl_compare_hw_state,
4300 };
4301 
4302 /**
4303  * intel_shared_dpll_init - Initialize shared DPLLs
4304  * @display: intel_display device
4305  *
4306  * Initialize shared DPLLs for @display.
4307  */
4308 void intel_shared_dpll_init(struct intel_display *display)
4309 {
4310 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4311 	const struct dpll_info *dpll_info;
4312 	int i;
4313 
4314 	mutex_init(&display->dpll.lock);
4315 
4316 	if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
4317 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4318 		dpll_mgr = NULL;
4319 	else if (display->platform.alderlake_p)
4320 		dpll_mgr = &adlp_pll_mgr;
4321 	else if (display->platform.alderlake_s)
4322 		dpll_mgr = &adls_pll_mgr;
4323 	else if (display->platform.dg1)
4324 		dpll_mgr = &dg1_pll_mgr;
4325 	else if (display->platform.rocketlake)
4326 		dpll_mgr = &rkl_pll_mgr;
4327 	else if (DISPLAY_VER(display) >= 12)
4328 		dpll_mgr = &tgl_pll_mgr;
4329 	else if (display->platform.jasperlake || display->platform.elkhartlake)
4330 		dpll_mgr = &ehl_pll_mgr;
4331 	else if (DISPLAY_VER(display) >= 11)
4332 		dpll_mgr = &icl_pll_mgr;
4333 	else if (display->platform.geminilake || display->platform.broxton)
4334 		dpll_mgr = &bxt_pll_mgr;
4335 	else if (DISPLAY_VER(display) == 9)
4336 		dpll_mgr = &skl_pll_mgr;
4337 	else if (HAS_DDI(display))
4338 		dpll_mgr = &hsw_pll_mgr;
4339 	else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
4340 		dpll_mgr = &pch_pll_mgr;
4341 
4342 	if (!dpll_mgr)
4343 		return;
4344 
4345 	dpll_info = dpll_mgr->dpll_info;
4346 
4347 	for (i = 0; dpll_info[i].name; i++) {
4348 		if (drm_WARN_ON(display->drm,
4349 				i >= ARRAY_SIZE(display->dpll.shared_dplls)))
4350 			break;
4351 
4352 		/* must fit into unsigned long bitmask on 32bit */
4353 		if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
4354 			break;
4355 
4356 		display->dpll.shared_dplls[i].info = &dpll_info[i];
4357 		display->dpll.shared_dplls[i].index = i;
4358 	}
4359 
4360 	display->dpll.mgr = dpll_mgr;
4361 	display->dpll.num_shared_dpll = i;
4362 }
4363 
4364 /**
4365  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4366  * @state: atomic state
4367  * @crtc: CRTC to compute DPLLs for
4368  * @encoder: encoder
4369  *
4370  * This function computes the DPLL state for the given CRTC and encoder.
4371  *
4372  * The new configuration in the atomic commit @state is made effective by
4373  * calling intel_shared_dpll_swap_state().
4374  *
4375  * Returns:
4376  * 0 on success, negative error code on failure.
4377  */
4378 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4379 			       struct intel_crtc *crtc,
4380 			       struct intel_encoder *encoder)
4381 {
4382 	struct intel_display *display = to_intel_display(state);
4383 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4384 
4385 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4386 		return -EINVAL;
4387 
4388 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4389 }
4390 
4391 /**
4392  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4393  * @state: atomic state
4394  * @crtc: CRTC to reserve DPLLs for
4395  * @encoder: encoder
4396  *
4397  * This function reserves all required DPLLs for the given CRTC and encoder
4398  * combination in the current atomic commit @state and the new @crtc atomic
4399  * state.
4400  *
4401  * The new configuration in the atomic commit @state is made effective by
4402  * calling intel_shared_dpll_swap_state().
4403  *
4404  * The reserved DPLLs should be released by calling
4405  * intel_release_shared_dplls().
4406  *
4407  * Returns:
4408  * 0 if all required DPLLs were successfully reserved,
4409  * negative error code otherwise.
4410  */
4411 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4412 			       struct intel_crtc *crtc,
4413 			       struct intel_encoder *encoder)
4414 {
4415 	struct intel_display *display = to_intel_display(state);
4416 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4417 
4418 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4419 		return -EINVAL;
4420 
4421 	return dpll_mgr->get_dplls(state, crtc, encoder);
4422 }
4423 
4424 /**
4425  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4426  * @state: atomic state
4427  * @crtc: crtc from which the DPLLs are to be released
4428  *
4429  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4430  * from the current atomic commit @state and the old @crtc atomic state.
4431  *
4432  * The new configuration in the atomic commit @state is made effective by
4433  * calling intel_shared_dpll_swap_state().
4434  */
4435 void intel_release_shared_dplls(struct intel_atomic_state *state,
4436 				struct intel_crtc *crtc)
4437 {
4438 	struct intel_display *display = to_intel_display(state);
4439 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4440 
4441 	/*
4442 	 * FIXME: this function is called for every platform having a
4443 	 * compute_clock hook, even though the platform doesn't yet support
4444 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4445 	 * called on those.
4446 	 */
4447 	if (!dpll_mgr)
4448 		return;
4449 
4450 	dpll_mgr->put_dplls(state, crtc);
4451 }
4452 
4453 /**
4454  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4455  * @state: atomic state
4456  * @crtc: the CRTC for which to update the active DPLL
4457  * @encoder: encoder determining the type of port DPLL
4458  *
4459  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4460  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4461  * DPLL selected will be based on the current mode of the encoder's port.
4462  */
4463 void intel_update_active_dpll(struct intel_atomic_state *state,
4464 			      struct intel_crtc *crtc,
4465 			      struct intel_encoder *encoder)
4466 {
4467 	struct intel_display *display = to_intel_display(encoder);
4468 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4469 
4470 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4471 		return;
4472 
4473 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4474 }
4475 
4476 /**
4477  * intel_dpll_get_freq - calculate the DPLL's output frequency
4478  * @display: intel_display device
4479  * @pll: DPLL for which to calculate the output frequency
4480  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4481  *
4482  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4483  */
4484 int intel_dpll_get_freq(struct intel_display *display,
4485 			const struct intel_shared_dpll *pll,
4486 			const struct intel_dpll_hw_state *dpll_hw_state)
4487 {
4488 	if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
4489 		return 0;
4490 
4491 	return pll->info->funcs->get_freq(display, pll, dpll_hw_state);
4492 }
4493 
4494 /**
4495  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4496  * @display: intel_display device instance
4497  * @pll: DPLL for which to calculate the output frequency
4498  * @dpll_hw_state: DPLL's hardware state
4499  *
4500  * Read out @pll's hardware state into @dpll_hw_state.
4501  */
4502 bool intel_dpll_get_hw_state(struct intel_display *display,
4503 			     struct intel_shared_dpll *pll,
4504 			     struct intel_dpll_hw_state *dpll_hw_state)
4505 {
4506 	return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
4507 }
4508 
4509 static void readout_dpll_hw_state(struct intel_display *display,
4510 				  struct intel_shared_dpll *pll)
4511 {
4512 	struct intel_crtc *crtc;
4513 
4514 	pll->on = intel_dpll_get_hw_state(display, pll, &pll->state.hw_state);
4515 
4516 	if (pll->on && pll->info->power_domain)
4517 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
4518 
4519 	pll->state.pipe_mask = 0;
4520 	for_each_intel_crtc(display->drm, crtc) {
4521 		struct intel_crtc_state *crtc_state =
4522 			to_intel_crtc_state(crtc->base.state);
4523 
4524 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4525 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4526 	}
4527 	pll->active_mask = pll->state.pipe_mask;
4528 
4529 	drm_dbg_kms(display->drm,
4530 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4531 		    pll->info->name, pll->state.pipe_mask, pll->on);
4532 }
4533 
4534 void intel_dpll_update_ref_clks(struct intel_display *display)
4535 {
4536 	if (display->dpll.mgr && display->dpll.mgr->update_ref_clks)
4537 		display->dpll.mgr->update_ref_clks(display);
4538 }
4539 
4540 void intel_dpll_readout_hw_state(struct intel_display *display)
4541 {
4542 	struct intel_shared_dpll *pll;
4543 	int i;
4544 
4545 	for_each_shared_dpll(display, pll, i)
4546 		readout_dpll_hw_state(display, pll);
4547 }
4548 
4549 static void sanitize_dpll_state(struct intel_display *display,
4550 				struct intel_shared_dpll *pll)
4551 {
4552 	if (!pll->on)
4553 		return;
4554 
4555 	adlp_cmtg_clock_gating_wa(display, pll);
4556 
4557 	if (pll->active_mask)
4558 		return;
4559 
4560 	drm_dbg_kms(display->drm,
4561 		    "%s enabled but not in use, disabling\n",
4562 		    pll->info->name);
4563 
4564 	_intel_disable_shared_dpll(display, pll);
4565 }
4566 
4567 void intel_dpll_sanitize_state(struct intel_display *display)
4568 {
4569 	struct intel_shared_dpll *pll;
4570 	int i;
4571 
4572 	intel_cx0_pll_power_save_wa(display);
4573 
4574 	for_each_shared_dpll(display, pll, i)
4575 		sanitize_dpll_state(display, pll);
4576 }
4577 
4578 /**
4579  * intel_dpll_dump_hw_state - dump hw_state
4580  * @display: intel_display structure
4581  * @p: where to print the state to
4582  * @dpll_hw_state: hw state to be dumped
4583  *
4584  * Dumo out the relevant values in @dpll_hw_state.
4585  */
4586 void intel_dpll_dump_hw_state(struct intel_display *display,
4587 			      struct drm_printer *p,
4588 			      const struct intel_dpll_hw_state *dpll_hw_state)
4589 {
4590 	if (display->dpll.mgr) {
4591 		display->dpll.mgr->dump_hw_state(p, dpll_hw_state);
4592 	} else {
4593 		/* fallback for platforms that don't use the shared dpll
4594 		 * infrastructure
4595 		 */
4596 		ibx_dump_hw_state(p, dpll_hw_state);
4597 	}
4598 }
4599 
4600 /**
4601  * intel_dpll_compare_hw_state - compare the two states
4602  * @display: intel_display structure
4603  * @a: first DPLL hw state
4604  * @b: second DPLL hw state
4605  *
4606  * Compare DPLL hw states @a and @b.
4607  *
4608  * Returns: true if the states are equal, false if the differ
4609  */
4610 bool intel_dpll_compare_hw_state(struct intel_display *display,
4611 				 const struct intel_dpll_hw_state *a,
4612 				 const struct intel_dpll_hw_state *b)
4613 {
4614 	if (display->dpll.mgr) {
4615 		return display->dpll.mgr->compare_hw_state(a, b);
4616 	} else {
4617 		/* fallback for platforms that don't use the shared dpll
4618 		 * infrastructure
4619 		 */
4620 		return ibx_compare_hw_state(a, b);
4621 	}
4622 }
4623 
4624 static void
4625 verify_single_dpll_state(struct intel_display *display,
4626 			 struct intel_shared_dpll *pll,
4627 			 struct intel_crtc *crtc,
4628 			 const struct intel_crtc_state *new_crtc_state)
4629 {
4630 	struct intel_dpll_hw_state dpll_hw_state = {};
4631 	u8 pipe_mask;
4632 	bool active;
4633 
4634 	active = intel_dpll_get_hw_state(display, pll, &dpll_hw_state);
4635 
4636 	if (!pll->info->always_on) {
4637 		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4638 					 "%s: pll in active use but not on in sw tracking\n",
4639 					 pll->info->name);
4640 		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4641 					 "%s: pll is on but not used by any active pipe\n",
4642 					 pll->info->name);
4643 		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4644 					 "%s: pll on state mismatch (expected %i, found %i)\n",
4645 					 pll->info->name, pll->on, active);
4646 	}
4647 
4648 	if (!crtc) {
4649 		INTEL_DISPLAY_STATE_WARN(display,
4650 					 pll->active_mask & ~pll->state.pipe_mask,
4651 					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4652 					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4653 
4654 		return;
4655 	}
4656 
4657 	pipe_mask = BIT(crtc->pipe);
4658 
4659 	if (new_crtc_state->hw.active)
4660 		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4661 					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4662 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4663 	else
4664 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4665 					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4666 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4667 
4668 	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4669 				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4670 				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4671 
4672 	INTEL_DISPLAY_STATE_WARN(display,
4673 				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4674 						   sizeof(dpll_hw_state)),
4675 				 "%s: pll hw state mismatch\n",
4676 				 pll->info->name);
4677 }
4678 
4679 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4680 			      const struct intel_shared_dpll *new_pll)
4681 {
4682 	return old_pll && new_pll && old_pll != new_pll &&
4683 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4684 }
4685 
4686 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4687 				    struct intel_crtc *crtc)
4688 {
4689 	struct intel_display *display = to_intel_display(state);
4690 	const struct intel_crtc_state *old_crtc_state =
4691 		intel_atomic_get_old_crtc_state(state, crtc);
4692 	const struct intel_crtc_state *new_crtc_state =
4693 		intel_atomic_get_new_crtc_state(state, crtc);
4694 
4695 	if (new_crtc_state->shared_dpll)
4696 		verify_single_dpll_state(display, new_crtc_state->shared_dpll,
4697 					 crtc, new_crtc_state);
4698 
4699 	if (old_crtc_state->shared_dpll &&
4700 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4701 		u8 pipe_mask = BIT(crtc->pipe);
4702 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4703 
4704 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4705 					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4706 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4707 
4708 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4709 		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4710 								     new_crtc_state->shared_dpll) &&
4711 					 pll->state.pipe_mask & pipe_mask,
4712 					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4713 					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4714 	}
4715 }
4716 
4717 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4718 {
4719 	struct intel_display *display = to_intel_display(state);
4720 	struct intel_shared_dpll *pll;
4721 	int i;
4722 
4723 	for_each_shared_dpll(display, pll, i)
4724 		verify_single_dpll_state(display, pll, NULL, NULL);
4725 }
4726