xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "bxt_dpio_phy_regs.h"
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_cx0_phy.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dkl_phy.h"
34 #include "intel_dkl_phy_regs.h"
35 #include "intel_dpio_phy.h"
36 #include "intel_dpll.h"
37 #include "intel_dpll_mgr.h"
38 #include "intel_hti.h"
39 #include "intel_mg_phy_regs.h"
40 #include "intel_pch_refclk.h"
41 #include "intel_tc.h"
42 
43 /**
44  * DOC: Display PLLs
45  *
46  * Display PLLs used for driving outputs vary by platform. While some have
47  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
48  * from a pool. In the latter scenario, it is possible that multiple pipes
49  * share a PLL if their configurations match.
50  *
51  * This file provides an abstraction over display PLLs. The function
52  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
53  * users of a PLL are tracked and that tracking is integrated with the atomic
54  * modset interface. During an atomic operation, required PLLs can be reserved
55  * for a given CRTC and encoder configuration by calling
56  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
57  * with intel_release_shared_dplls().
58  * Changes to the users are first staged in the atomic state, and then made
59  * effective by calling intel_shared_dpll_swap_state() during the atomic
60  * commit phase.
61  */
62 
63 /* platform specific hooks for managing DPLLs */
64 struct intel_shared_dpll_funcs {
65 	/*
66 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
67 	 * the pll is not already enabled.
68 	 */
69 	void (*enable)(struct intel_display *display,
70 		       struct intel_shared_dpll *pll,
71 		       const struct intel_dpll_hw_state *dpll_hw_state);
72 
73 	/*
74 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
75 	 * only when it is safe to disable the pll, i.e., there are no more
76 	 * tracked users for it.
77 	 */
78 	void (*disable)(struct intel_display *display,
79 			struct intel_shared_dpll *pll);
80 
81 	/*
82 	 * Hook for reading the values currently programmed to the DPLL
83 	 * registers. This is used for initial hw state readout and state
84 	 * verification after a mode set.
85 	 */
86 	bool (*get_hw_state)(struct intel_display *display,
87 			     struct intel_shared_dpll *pll,
88 			     struct intel_dpll_hw_state *dpll_hw_state);
89 
90 	/*
91 	 * Hook for calculating the pll's output frequency based on its passed
92 	 * in state.
93 	 */
94 	int (*get_freq)(struct intel_display *i915,
95 			const struct intel_shared_dpll *pll,
96 			const struct intel_dpll_hw_state *dpll_hw_state);
97 };
98 
99 struct intel_dpll_mgr {
100 	const struct dpll_info *dpll_info;
101 
102 	int (*compute_dplls)(struct intel_atomic_state *state,
103 			     struct intel_crtc *crtc,
104 			     struct intel_encoder *encoder);
105 	int (*get_dplls)(struct intel_atomic_state *state,
106 			 struct intel_crtc *crtc,
107 			 struct intel_encoder *encoder);
108 	void (*put_dplls)(struct intel_atomic_state *state,
109 			  struct intel_crtc *crtc);
110 	void (*update_active_dpll)(struct intel_atomic_state *state,
111 				   struct intel_crtc *crtc,
112 				   struct intel_encoder *encoder);
113 	void (*update_ref_clks)(struct intel_display *display);
114 	void (*dump_hw_state)(struct drm_printer *p,
115 			      const struct intel_dpll_hw_state *dpll_hw_state);
116 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
117 				 const struct intel_dpll_hw_state *b);
118 };
119 
120 static void
121 intel_atomic_duplicate_dpll_state(struct intel_display *display,
122 				  struct intel_shared_dpll_state *shared_dpll)
123 {
124 	struct intel_shared_dpll *pll;
125 	int i;
126 
127 	/* Copy shared dpll state */
128 	for_each_shared_dpll(display, pll, i)
129 		shared_dpll[pll->index] = pll->state;
130 }
131 
132 static struct intel_shared_dpll_state *
133 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
134 {
135 	struct intel_atomic_state *state = to_intel_atomic_state(s);
136 	struct intel_display *display = to_intel_display(state);
137 
138 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
139 
140 	if (!state->dpll_set) {
141 		state->dpll_set = true;
142 
143 		intel_atomic_duplicate_dpll_state(display,
144 						  state->shared_dpll);
145 	}
146 
147 	return state->shared_dpll;
148 }
149 
150 /**
151  * intel_get_shared_dpll_by_id - get a DPLL given its id
152  * @display: intel_display device instance
153  * @id: pll id
154  *
155  * Returns:
156  * A pointer to the DPLL with @id
157  */
158 struct intel_shared_dpll *
159 intel_get_shared_dpll_by_id(struct intel_display *display,
160 			    enum intel_dpll_id id)
161 {
162 	struct intel_shared_dpll *pll;
163 	int i;
164 
165 	for_each_shared_dpll(display, pll, i) {
166 		if (pll->info->id == id)
167 			return pll;
168 	}
169 
170 	MISSING_CASE(id);
171 	return NULL;
172 }
173 
174 /* For ILK+ */
175 void assert_shared_dpll(struct intel_display *display,
176 			struct intel_shared_dpll *pll,
177 			bool state)
178 {
179 	bool cur_state;
180 	struct intel_dpll_hw_state hw_state;
181 
182 	if (drm_WARN(display->drm, !pll,
183 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
184 		return;
185 
186 	cur_state = intel_dpll_get_hw_state(display, pll, &hw_state);
187 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
188 				 "%s assertion failure (expected %s, current %s)\n",
189 				 pll->info->name, str_on_off(state),
190 				 str_on_off(cur_state));
191 }
192 
193 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
194 {
195 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
196 }
197 
198 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
199 {
200 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
201 }
202 
203 static i915_reg_t
204 intel_combo_pll_enable_reg(struct intel_display *display,
205 			   struct intel_shared_dpll *pll)
206 {
207 	if (display->platform.dg1)
208 		return DG1_DPLL_ENABLE(pll->info->id);
209 	else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
210 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
211 		return MG_PLL_ENABLE(0);
212 
213 	return ICL_DPLL_ENABLE(pll->info->id);
214 }
215 
216 static i915_reg_t
217 intel_tc_pll_enable_reg(struct intel_display *display,
218 			struct intel_shared_dpll *pll)
219 {
220 	const enum intel_dpll_id id = pll->info->id;
221 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
222 
223 	if (display->platform.alderlake_p)
224 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
225 
226 	return MG_PLL_ENABLE(tc_port);
227 }
228 
229 static void _intel_enable_shared_dpll(struct intel_display *display,
230 				      struct intel_shared_dpll *pll)
231 {
232 	if (pll->info->power_domain)
233 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
234 
235 	pll->info->funcs->enable(display, pll, &pll->state.hw_state);
236 	pll->on = true;
237 }
238 
239 static void _intel_disable_shared_dpll(struct intel_display *display,
240 				       struct intel_shared_dpll *pll)
241 {
242 	pll->info->funcs->disable(display, pll);
243 	pll->on = false;
244 
245 	if (pll->info->power_domain)
246 		intel_display_power_put(display, pll->info->power_domain, pll->wakeref);
247 }
248 
249 /**
250  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
251  * @crtc_state: CRTC, and its state, which has a shared DPLL
252  *
253  * Enable the shared DPLL used by @crtc.
254  */
255 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
256 {
257 	struct intel_display *display = to_intel_display(crtc_state);
258 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
259 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
260 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
261 	unsigned int old_mask;
262 
263 	if (drm_WARN_ON(display->drm, !pll))
264 		return;
265 
266 	mutex_lock(&display->dpll.lock);
267 	old_mask = pll->active_mask;
268 
269 	if (drm_WARN_ON(display->drm, !(pll->state.pipe_mask & pipe_mask)) ||
270 	    drm_WARN_ON(display->drm, pll->active_mask & pipe_mask))
271 		goto out;
272 
273 	pll->active_mask |= pipe_mask;
274 
275 	drm_dbg_kms(display->drm,
276 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
277 		    pll->info->name, pll->active_mask, pll->on,
278 		    crtc->base.base.id, crtc->base.name);
279 
280 	if (old_mask) {
281 		drm_WARN_ON(display->drm, !pll->on);
282 		assert_shared_dpll_enabled(display, pll);
283 		goto out;
284 	}
285 	drm_WARN_ON(display->drm, pll->on);
286 
287 	drm_dbg_kms(display->drm, "enabling %s\n", pll->info->name);
288 
289 	_intel_enable_shared_dpll(display, pll);
290 
291 out:
292 	mutex_unlock(&display->dpll.lock);
293 }
294 
295 /**
296  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
297  * @crtc_state: CRTC, and its state, which has a shared DPLL
298  *
299  * Disable the shared DPLL used by @crtc.
300  */
301 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
302 {
303 	struct intel_display *display = to_intel_display(crtc_state);
304 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
305 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
306 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
307 
308 	/* PCH only available on ILK+ */
309 	if (DISPLAY_VER(display) < 5)
310 		return;
311 
312 	if (pll == NULL)
313 		return;
314 
315 	mutex_lock(&display->dpll.lock);
316 	if (drm_WARN(display->drm, !(pll->active_mask & pipe_mask),
317 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
318 		     crtc->base.base.id, crtc->base.name))
319 		goto out;
320 
321 	drm_dbg_kms(display->drm,
322 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
323 		    pll->info->name, pll->active_mask, pll->on,
324 		    crtc->base.base.id, crtc->base.name);
325 
326 	assert_shared_dpll_enabled(display, pll);
327 	drm_WARN_ON(display->drm, !pll->on);
328 
329 	pll->active_mask &= ~pipe_mask;
330 	if (pll->active_mask)
331 		goto out;
332 
333 	drm_dbg_kms(display->drm, "disabling %s\n", pll->info->name);
334 
335 	_intel_disable_shared_dpll(display, pll);
336 
337 out:
338 	mutex_unlock(&display->dpll.lock);
339 }
340 
341 static unsigned long
342 intel_dpll_mask_all(struct intel_display *display)
343 {
344 	struct intel_shared_dpll *pll;
345 	unsigned long dpll_mask = 0;
346 	int i;
347 
348 	for_each_shared_dpll(display, pll, i) {
349 		drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
350 
351 		dpll_mask |= BIT(pll->info->id);
352 	}
353 
354 	return dpll_mask;
355 }
356 
357 static struct intel_shared_dpll *
358 intel_find_shared_dpll(struct intel_atomic_state *state,
359 		       const struct intel_crtc *crtc,
360 		       const struct intel_dpll_hw_state *dpll_hw_state,
361 		       unsigned long dpll_mask)
362 {
363 	struct intel_display *display = to_intel_display(crtc);
364 	unsigned long dpll_mask_all = intel_dpll_mask_all(display);
365 	struct intel_shared_dpll_state *shared_dpll;
366 	struct intel_shared_dpll *unused_pll = NULL;
367 	enum intel_dpll_id id;
368 
369 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
370 
371 	drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
372 
373 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
374 		struct intel_shared_dpll *pll;
375 
376 		pll = intel_get_shared_dpll_by_id(display, id);
377 		if (!pll)
378 			continue;
379 
380 		/* Only want to check enabled timings first */
381 		if (shared_dpll[pll->index].pipe_mask == 0) {
382 			if (!unused_pll)
383 				unused_pll = pll;
384 			continue;
385 		}
386 
387 		if (memcmp(dpll_hw_state,
388 			   &shared_dpll[pll->index].hw_state,
389 			   sizeof(*dpll_hw_state)) == 0) {
390 			drm_dbg_kms(display->drm,
391 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
392 				    crtc->base.base.id, crtc->base.name,
393 				    pll->info->name,
394 				    shared_dpll[pll->index].pipe_mask,
395 				    pll->active_mask);
396 			return pll;
397 		}
398 	}
399 
400 	/* Ok no matching timings, maybe there's a free one? */
401 	if (unused_pll) {
402 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] allocated %s\n",
403 			    crtc->base.base.id, crtc->base.name,
404 			    unused_pll->info->name);
405 		return unused_pll;
406 	}
407 
408 	return NULL;
409 }
410 
411 /**
412  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
413  * @crtc: CRTC on which behalf the reference is taken
414  * @pll: DPLL for which the reference is taken
415  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
416  *
417  * Take a reference for @pll tracking the use of it by @crtc.
418  */
419 static void
420 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
421 				 const struct intel_shared_dpll *pll,
422 				 struct intel_shared_dpll_state *shared_dpll_state)
423 {
424 	struct intel_display *display = to_intel_display(crtc);
425 
426 	drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
427 
428 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
429 
430 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
431 		    crtc->base.base.id, crtc->base.name, pll->info->name);
432 }
433 
434 static void
435 intel_reference_shared_dpll(struct intel_atomic_state *state,
436 			    const struct intel_crtc *crtc,
437 			    const struct intel_shared_dpll *pll,
438 			    const struct intel_dpll_hw_state *dpll_hw_state)
439 {
440 	struct intel_shared_dpll_state *shared_dpll;
441 
442 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
443 
444 	if (shared_dpll[pll->index].pipe_mask == 0)
445 		shared_dpll[pll->index].hw_state = *dpll_hw_state;
446 
447 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
448 }
449 
450 /**
451  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
452  * @crtc: CRTC on which behalf the reference is dropped
453  * @pll: DPLL for which the reference is dropped
454  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
455  *
456  * Drop a reference for @pll tracking the end of use of it by @crtc.
457  */
458 void
459 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
460 				   const struct intel_shared_dpll *pll,
461 				   struct intel_shared_dpll_state *shared_dpll_state)
462 {
463 	struct intel_display *display = to_intel_display(crtc);
464 
465 	drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
466 
467 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
468 
469 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
470 		    crtc->base.base.id, crtc->base.name, pll->info->name);
471 }
472 
473 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
474 					  const struct intel_crtc *crtc,
475 					  const struct intel_shared_dpll *pll)
476 {
477 	struct intel_shared_dpll_state *shared_dpll;
478 
479 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
480 
481 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
482 }
483 
484 static void intel_put_dpll(struct intel_atomic_state *state,
485 			   struct intel_crtc *crtc)
486 {
487 	const struct intel_crtc_state *old_crtc_state =
488 		intel_atomic_get_old_crtc_state(state, crtc);
489 	struct intel_crtc_state *new_crtc_state =
490 		intel_atomic_get_new_crtc_state(state, crtc);
491 
492 	new_crtc_state->shared_dpll = NULL;
493 
494 	if (!old_crtc_state->shared_dpll)
495 		return;
496 
497 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
498 }
499 
500 /**
501  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
502  * @state: atomic state
503  *
504  * This is the dpll version of drm_atomic_helper_swap_state() since the
505  * helper does not handle driver-specific global state.
506  *
507  * For consistency with atomic helpers this function does a complete swap,
508  * i.e. it also puts the current state into @state, even though there is no
509  * need for that at this moment.
510  */
511 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
512 {
513 	struct intel_display *display = to_intel_display(state);
514 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
515 	struct intel_shared_dpll *pll;
516 	int i;
517 
518 	if (!state->dpll_set)
519 		return;
520 
521 	for_each_shared_dpll(display, pll, i)
522 		swap(pll->state, shared_dpll[pll->index]);
523 }
524 
525 static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
526 				      struct intel_shared_dpll *pll,
527 				      struct intel_dpll_hw_state *dpll_hw_state)
528 {
529 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
530 	const enum intel_dpll_id id = pll->info->id;
531 	intel_wakeref_t wakeref;
532 	u32 val;
533 
534 	wakeref = intel_display_power_get_if_enabled(display,
535 						     POWER_DOMAIN_DISPLAY_CORE);
536 	if (!wakeref)
537 		return false;
538 
539 	val = intel_de_read(display, PCH_DPLL(id));
540 	hw_state->dpll = val;
541 	hw_state->fp0 = intel_de_read(display, PCH_FP0(id));
542 	hw_state->fp1 = intel_de_read(display, PCH_FP1(id));
543 
544 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
545 
546 	return val & DPLL_VCO_ENABLE;
547 }
548 
549 static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
550 {
551 	u32 val;
552 	bool enabled;
553 
554 	val = intel_de_read(display, PCH_DREF_CONTROL);
555 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
556 			    DREF_SUPERSPREAD_SOURCE_MASK));
557 	INTEL_DISPLAY_STATE_WARN(display, !enabled,
558 				 "PCH refclk assertion failure, should be active but is disabled\n");
559 }
560 
561 static void ibx_pch_dpll_enable(struct intel_display *display,
562 				struct intel_shared_dpll *pll,
563 				const struct intel_dpll_hw_state *dpll_hw_state)
564 {
565 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
566 	const enum intel_dpll_id id = pll->info->id;
567 
568 	/* PCH refclock must be enabled first */
569 	ibx_assert_pch_refclk_enabled(display);
570 
571 	intel_de_write(display, PCH_FP0(id), hw_state->fp0);
572 	intel_de_write(display, PCH_FP1(id), hw_state->fp1);
573 
574 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
575 
576 	/* Wait for the clocks to stabilize. */
577 	intel_de_posting_read(display, PCH_DPLL(id));
578 	udelay(150);
579 
580 	/* The pixel multiplier can only be updated once the
581 	 * DPLL is enabled and the clocks are stable.
582 	 *
583 	 * So write it again.
584 	 */
585 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
586 	intel_de_posting_read(display, PCH_DPLL(id));
587 	udelay(200);
588 }
589 
590 static void ibx_pch_dpll_disable(struct intel_display *display,
591 				 struct intel_shared_dpll *pll)
592 {
593 	const enum intel_dpll_id id = pll->info->id;
594 
595 	intel_de_write(display, PCH_DPLL(id), 0);
596 	intel_de_posting_read(display, PCH_DPLL(id));
597 	udelay(200);
598 }
599 
600 static int ibx_compute_dpll(struct intel_atomic_state *state,
601 			    struct intel_crtc *crtc,
602 			    struct intel_encoder *encoder)
603 {
604 	return 0;
605 }
606 
607 static int ibx_get_dpll(struct intel_atomic_state *state,
608 			struct intel_crtc *crtc,
609 			struct intel_encoder *encoder)
610 {
611 	struct intel_display *display = to_intel_display(state);
612 	struct drm_i915_private *i915 = to_i915(display->drm);
613 	struct intel_crtc_state *crtc_state =
614 		intel_atomic_get_new_crtc_state(state, crtc);
615 	struct intel_shared_dpll *pll;
616 	enum intel_dpll_id id;
617 
618 	if (HAS_PCH_IBX(i915)) {
619 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
620 		id = (enum intel_dpll_id) crtc->pipe;
621 		pll = intel_get_shared_dpll_by_id(display, id);
622 
623 		drm_dbg_kms(display->drm,
624 			    "[CRTC:%d:%s] using pre-allocated %s\n",
625 			    crtc->base.base.id, crtc->base.name,
626 			    pll->info->name);
627 	} else {
628 		pll = intel_find_shared_dpll(state, crtc,
629 					     &crtc_state->dpll_hw_state,
630 					     BIT(DPLL_ID_PCH_PLL_B) |
631 					     BIT(DPLL_ID_PCH_PLL_A));
632 	}
633 
634 	if (!pll)
635 		return -EINVAL;
636 
637 	/* reference the pll */
638 	intel_reference_shared_dpll(state, crtc,
639 				    pll, &crtc_state->dpll_hw_state);
640 
641 	crtc_state->shared_dpll = pll;
642 
643 	return 0;
644 }
645 
646 static void ibx_dump_hw_state(struct drm_printer *p,
647 			      const struct intel_dpll_hw_state *dpll_hw_state)
648 {
649 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
650 
651 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
652 		   "fp0: 0x%x, fp1: 0x%x\n",
653 		   hw_state->dpll,
654 		   hw_state->dpll_md,
655 		   hw_state->fp0,
656 		   hw_state->fp1);
657 }
658 
659 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
660 				 const struct intel_dpll_hw_state *_b)
661 {
662 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
663 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
664 
665 	return a->dpll == b->dpll &&
666 		a->dpll_md == b->dpll_md &&
667 		a->fp0 == b->fp0 &&
668 		a->fp1 == b->fp1;
669 }
670 
671 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
672 	.enable = ibx_pch_dpll_enable,
673 	.disable = ibx_pch_dpll_disable,
674 	.get_hw_state = ibx_pch_dpll_get_hw_state,
675 };
676 
677 static const struct dpll_info pch_plls[] = {
678 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
679 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
680 	{}
681 };
682 
683 static const struct intel_dpll_mgr pch_pll_mgr = {
684 	.dpll_info = pch_plls,
685 	.compute_dplls = ibx_compute_dpll,
686 	.get_dplls = ibx_get_dpll,
687 	.put_dplls = intel_put_dpll,
688 	.dump_hw_state = ibx_dump_hw_state,
689 	.compare_hw_state = ibx_compare_hw_state,
690 };
691 
692 static void hsw_ddi_wrpll_enable(struct intel_display *display,
693 				 struct intel_shared_dpll *pll,
694 				 const struct intel_dpll_hw_state *dpll_hw_state)
695 {
696 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
697 	const enum intel_dpll_id id = pll->info->id;
698 
699 	intel_de_write(display, WRPLL_CTL(id), hw_state->wrpll);
700 	intel_de_posting_read(display, WRPLL_CTL(id));
701 	udelay(20);
702 }
703 
704 static void hsw_ddi_spll_enable(struct intel_display *display,
705 				struct intel_shared_dpll *pll,
706 				const struct intel_dpll_hw_state *dpll_hw_state)
707 {
708 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
709 
710 	intel_de_write(display, SPLL_CTL, hw_state->spll);
711 	intel_de_posting_read(display, SPLL_CTL);
712 	udelay(20);
713 }
714 
715 static void hsw_ddi_wrpll_disable(struct intel_display *display,
716 				  struct intel_shared_dpll *pll)
717 {
718 	const enum intel_dpll_id id = pll->info->id;
719 
720 	intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
721 	intel_de_posting_read(display, WRPLL_CTL(id));
722 
723 	/*
724 	 * Try to set up the PCH reference clock once all DPLLs
725 	 * that depend on it have been shut down.
726 	 */
727 	if (display->dpll.pch_ssc_use & BIT(id))
728 		intel_init_pch_refclk(display);
729 }
730 
731 static void hsw_ddi_spll_disable(struct intel_display *display,
732 				 struct intel_shared_dpll *pll)
733 {
734 	enum intel_dpll_id id = pll->info->id;
735 
736 	intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
737 	intel_de_posting_read(display, SPLL_CTL);
738 
739 	/*
740 	 * Try to set up the PCH reference clock once all DPLLs
741 	 * that depend on it have been shut down.
742 	 */
743 	if (display->dpll.pch_ssc_use & BIT(id))
744 		intel_init_pch_refclk(display);
745 }
746 
747 static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
748 				       struct intel_shared_dpll *pll,
749 				       struct intel_dpll_hw_state *dpll_hw_state)
750 {
751 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
752 	const enum intel_dpll_id id = pll->info->id;
753 	intel_wakeref_t wakeref;
754 	u32 val;
755 
756 	wakeref = intel_display_power_get_if_enabled(display,
757 						     POWER_DOMAIN_DISPLAY_CORE);
758 	if (!wakeref)
759 		return false;
760 
761 	val = intel_de_read(display, WRPLL_CTL(id));
762 	hw_state->wrpll = val;
763 
764 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
765 
766 	return val & WRPLL_PLL_ENABLE;
767 }
768 
769 static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
770 				      struct intel_shared_dpll *pll,
771 				      struct intel_dpll_hw_state *dpll_hw_state)
772 {
773 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
774 	intel_wakeref_t wakeref;
775 	u32 val;
776 
777 	wakeref = intel_display_power_get_if_enabled(display,
778 						     POWER_DOMAIN_DISPLAY_CORE);
779 	if (!wakeref)
780 		return false;
781 
782 	val = intel_de_read(display, SPLL_CTL);
783 	hw_state->spll = val;
784 
785 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
786 
787 	return val & SPLL_PLL_ENABLE;
788 }
789 
790 #define LC_FREQ 2700
791 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
792 
793 #define P_MIN 2
794 #define P_MAX 64
795 #define P_INC 2
796 
797 /* Constraints for PLL good behavior */
798 #define REF_MIN 48
799 #define REF_MAX 400
800 #define VCO_MIN 2400
801 #define VCO_MAX 4800
802 
803 struct hsw_wrpll_rnp {
804 	unsigned p, n2, r2;
805 };
806 
807 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
808 {
809 	switch (clock) {
810 	case 25175000:
811 	case 25200000:
812 	case 27000000:
813 	case 27027000:
814 	case 37762500:
815 	case 37800000:
816 	case 40500000:
817 	case 40541000:
818 	case 54000000:
819 	case 54054000:
820 	case 59341000:
821 	case 59400000:
822 	case 72000000:
823 	case 74176000:
824 	case 74250000:
825 	case 81000000:
826 	case 81081000:
827 	case 89012000:
828 	case 89100000:
829 	case 108000000:
830 	case 108108000:
831 	case 111264000:
832 	case 111375000:
833 	case 148352000:
834 	case 148500000:
835 	case 162000000:
836 	case 162162000:
837 	case 222525000:
838 	case 222750000:
839 	case 296703000:
840 	case 297000000:
841 		return 0;
842 	case 233500000:
843 	case 245250000:
844 	case 247750000:
845 	case 253250000:
846 	case 298000000:
847 		return 1500;
848 	case 169128000:
849 	case 169500000:
850 	case 179500000:
851 	case 202000000:
852 		return 2000;
853 	case 256250000:
854 	case 262500000:
855 	case 270000000:
856 	case 272500000:
857 	case 273750000:
858 	case 280750000:
859 	case 281250000:
860 	case 286000000:
861 	case 291750000:
862 		return 4000;
863 	case 267250000:
864 	case 268500000:
865 		return 5000;
866 	default:
867 		return 1000;
868 	}
869 }
870 
871 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
872 				 unsigned int r2, unsigned int n2,
873 				 unsigned int p,
874 				 struct hsw_wrpll_rnp *best)
875 {
876 	u64 a, b, c, d, diff, diff_best;
877 
878 	/* No best (r,n,p) yet */
879 	if (best->p == 0) {
880 		best->p = p;
881 		best->n2 = n2;
882 		best->r2 = r2;
883 		return;
884 	}
885 
886 	/*
887 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
888 	 * freq2k.
889 	 *
890 	 * delta = 1e6 *
891 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
892 	 *	   freq2k;
893 	 *
894 	 * and we would like delta <= budget.
895 	 *
896 	 * If the discrepancy is above the PPM-based budget, always prefer to
897 	 * improve upon the previous solution.  However, if you're within the
898 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
899 	 */
900 	a = freq2k * budget * p * r2;
901 	b = freq2k * budget * best->p * best->r2;
902 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
903 	diff_best = abs_diff(freq2k * best->p * best->r2,
904 			     LC_FREQ_2K * best->n2);
905 	c = 1000000 * diff;
906 	d = 1000000 * diff_best;
907 
908 	if (a < c && b < d) {
909 		/* If both are above the budget, pick the closer */
910 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
911 			best->p = p;
912 			best->n2 = n2;
913 			best->r2 = r2;
914 		}
915 	} else if (a >= c && b < d) {
916 		/* If A is below the threshold but B is above it?  Update. */
917 		best->p = p;
918 		best->n2 = n2;
919 		best->r2 = r2;
920 	} else if (a >= c && b >= d) {
921 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
922 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
923 			best->p = p;
924 			best->n2 = n2;
925 			best->r2 = r2;
926 		}
927 	}
928 	/* Otherwise a < c && b >= d, do nothing */
929 }
930 
931 static void
932 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
933 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
934 {
935 	u64 freq2k;
936 	unsigned p, n2, r2;
937 	struct hsw_wrpll_rnp best = {};
938 	unsigned budget;
939 
940 	freq2k = clock / 100;
941 
942 	budget = hsw_wrpll_get_budget_for_freq(clock);
943 
944 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
945 	 * and directly pass the LC PLL to it. */
946 	if (freq2k == 5400000) {
947 		*n2_out = 2;
948 		*p_out = 1;
949 		*r2_out = 2;
950 		return;
951 	}
952 
953 	/*
954 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
955 	 * the WR PLL.
956 	 *
957 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
958 	 * Injecting R2 = 2 * R gives:
959 	 *   REF_MAX * r2 > LC_FREQ * 2 and
960 	 *   REF_MIN * r2 < LC_FREQ * 2
961 	 *
962 	 * Which means the desired boundaries for r2 are:
963 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
964 	 *
965 	 */
966 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
967 	     r2 <= LC_FREQ * 2 / REF_MIN;
968 	     r2++) {
969 
970 		/*
971 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
972 		 *
973 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
974 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
975 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
976 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
977 		 *
978 		 * Which means the desired boundaries for n2 are:
979 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
980 		 */
981 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
982 		     n2 <= VCO_MAX * r2 / LC_FREQ;
983 		     n2++) {
984 
985 			for (p = P_MIN; p <= P_MAX; p += P_INC)
986 				hsw_wrpll_update_rnp(freq2k, budget,
987 						     r2, n2, p, &best);
988 		}
989 	}
990 
991 	*n2_out = best.n2;
992 	*p_out = best.p;
993 	*r2_out = best.r2;
994 }
995 
996 static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
997 				  const struct intel_shared_dpll *pll,
998 				  const struct intel_dpll_hw_state *dpll_hw_state)
999 {
1000 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1001 	int refclk;
1002 	int n, p, r;
1003 	u32 wrpll = hw_state->wrpll;
1004 
1005 	switch (wrpll & WRPLL_REF_MASK) {
1006 	case WRPLL_REF_SPECIAL_HSW:
1007 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1008 		if (display->platform.haswell && !display->platform.haswell_ult) {
1009 			refclk = display->dpll.ref_clks.nssc;
1010 			break;
1011 		}
1012 		fallthrough;
1013 	case WRPLL_REF_PCH_SSC:
1014 		/*
1015 		 * We could calculate spread here, but our checking
1016 		 * code only cares about 5% accuracy, and spread is a max of
1017 		 * 0.5% downspread.
1018 		 */
1019 		refclk = display->dpll.ref_clks.ssc;
1020 		break;
1021 	case WRPLL_REF_LCPLL:
1022 		refclk = 2700000;
1023 		break;
1024 	default:
1025 		MISSING_CASE(wrpll);
1026 		return 0;
1027 	}
1028 
1029 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1030 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1031 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1032 
1033 	/* Convert to KHz, p & r have a fixed point portion */
1034 	return (refclk * n / 10) / (p * r) * 2;
1035 }
1036 
1037 static int
1038 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1039 			   struct intel_crtc *crtc)
1040 {
1041 	struct intel_display *display = to_intel_display(state);
1042 	struct intel_crtc_state *crtc_state =
1043 		intel_atomic_get_new_crtc_state(state, crtc);
1044 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1045 	unsigned int p, n2, r2;
1046 
1047 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1048 
1049 	hw_state->wrpll =
1050 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1051 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1052 		WRPLL_DIVIDER_POST(p);
1053 
1054 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(display, NULL,
1055 							&crtc_state->dpll_hw_state);
1056 
1057 	return 0;
1058 }
1059 
1060 static struct intel_shared_dpll *
1061 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1062 		       struct intel_crtc *crtc)
1063 {
1064 	struct intel_crtc_state *crtc_state =
1065 		intel_atomic_get_new_crtc_state(state, crtc);
1066 
1067 	return intel_find_shared_dpll(state, crtc,
1068 				      &crtc_state->dpll_hw_state,
1069 				      BIT(DPLL_ID_WRPLL2) |
1070 				      BIT(DPLL_ID_WRPLL1));
1071 }
1072 
1073 static int
1074 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1075 {
1076 	struct intel_display *display = to_intel_display(crtc_state);
1077 	int clock = crtc_state->port_clock;
1078 
1079 	switch (clock / 2) {
1080 	case 81000:
1081 	case 135000:
1082 	case 270000:
1083 		return 0;
1084 	default:
1085 		drm_dbg_kms(display->drm, "Invalid clock for DP: %d\n",
1086 			    clock);
1087 		return -EINVAL;
1088 	}
1089 }
1090 
1091 static struct intel_shared_dpll *
1092 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1093 {
1094 	struct intel_display *display = to_intel_display(crtc_state);
1095 	struct intel_shared_dpll *pll;
1096 	enum intel_dpll_id pll_id;
1097 	int clock = crtc_state->port_clock;
1098 
1099 	switch (clock / 2) {
1100 	case 81000:
1101 		pll_id = DPLL_ID_LCPLL_810;
1102 		break;
1103 	case 135000:
1104 		pll_id = DPLL_ID_LCPLL_1350;
1105 		break;
1106 	case 270000:
1107 		pll_id = DPLL_ID_LCPLL_2700;
1108 		break;
1109 	default:
1110 		MISSING_CASE(clock / 2);
1111 		return NULL;
1112 	}
1113 
1114 	pll = intel_get_shared_dpll_by_id(display, pll_id);
1115 
1116 	if (!pll)
1117 		return NULL;
1118 
1119 	return pll;
1120 }
1121 
1122 static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
1123 				  const struct intel_shared_dpll *pll,
1124 				  const struct intel_dpll_hw_state *dpll_hw_state)
1125 {
1126 	int link_clock = 0;
1127 
1128 	switch (pll->info->id) {
1129 	case DPLL_ID_LCPLL_810:
1130 		link_clock = 81000;
1131 		break;
1132 	case DPLL_ID_LCPLL_1350:
1133 		link_clock = 135000;
1134 		break;
1135 	case DPLL_ID_LCPLL_2700:
1136 		link_clock = 270000;
1137 		break;
1138 	default:
1139 		drm_WARN(display->drm, 1, "bad port clock sel\n");
1140 		break;
1141 	}
1142 
1143 	return link_clock * 2;
1144 }
1145 
1146 static int
1147 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1148 			  struct intel_crtc *crtc)
1149 {
1150 	struct intel_crtc_state *crtc_state =
1151 		intel_atomic_get_new_crtc_state(state, crtc);
1152 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1153 
1154 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1155 		return -EINVAL;
1156 
1157 	hw_state->spll =
1158 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1159 
1160 	return 0;
1161 }
1162 
1163 static struct intel_shared_dpll *
1164 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1165 		      struct intel_crtc *crtc)
1166 {
1167 	struct intel_crtc_state *crtc_state =
1168 		intel_atomic_get_new_crtc_state(state, crtc);
1169 
1170 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1171 				      BIT(DPLL_ID_SPLL));
1172 }
1173 
1174 static int hsw_ddi_spll_get_freq(struct intel_display *display,
1175 				 const struct intel_shared_dpll *pll,
1176 				 const struct intel_dpll_hw_state *dpll_hw_state)
1177 {
1178 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1179 	int link_clock = 0;
1180 
1181 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1182 	case SPLL_FREQ_810MHz:
1183 		link_clock = 81000;
1184 		break;
1185 	case SPLL_FREQ_1350MHz:
1186 		link_clock = 135000;
1187 		break;
1188 	case SPLL_FREQ_2700MHz:
1189 		link_clock = 270000;
1190 		break;
1191 	default:
1192 		drm_WARN(display->drm, 1, "bad spll freq\n");
1193 		break;
1194 	}
1195 
1196 	return link_clock * 2;
1197 }
1198 
1199 static int hsw_compute_dpll(struct intel_atomic_state *state,
1200 			    struct intel_crtc *crtc,
1201 			    struct intel_encoder *encoder)
1202 {
1203 	struct intel_crtc_state *crtc_state =
1204 		intel_atomic_get_new_crtc_state(state, crtc);
1205 
1206 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1207 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1208 	else if (intel_crtc_has_dp_encoder(crtc_state))
1209 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1210 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1211 		return hsw_ddi_spll_compute_dpll(state, crtc);
1212 	else
1213 		return -EINVAL;
1214 }
1215 
1216 static int hsw_get_dpll(struct intel_atomic_state *state,
1217 			struct intel_crtc *crtc,
1218 			struct intel_encoder *encoder)
1219 {
1220 	struct intel_crtc_state *crtc_state =
1221 		intel_atomic_get_new_crtc_state(state, crtc);
1222 	struct intel_shared_dpll *pll = NULL;
1223 
1224 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1225 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1226 	else if (intel_crtc_has_dp_encoder(crtc_state))
1227 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1228 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1229 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1230 
1231 	if (!pll)
1232 		return -EINVAL;
1233 
1234 	intel_reference_shared_dpll(state, crtc,
1235 				    pll, &crtc_state->dpll_hw_state);
1236 
1237 	crtc_state->shared_dpll = pll;
1238 
1239 	return 0;
1240 }
1241 
1242 static void hsw_update_dpll_ref_clks(struct intel_display *display)
1243 {
1244 	display->dpll.ref_clks.ssc = 135000;
1245 	/* Non-SSC is only used on non-ULT HSW. */
1246 	if (intel_de_read(display, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1247 		display->dpll.ref_clks.nssc = 24000;
1248 	else
1249 		display->dpll.ref_clks.nssc = 135000;
1250 }
1251 
1252 static void hsw_dump_hw_state(struct drm_printer *p,
1253 			      const struct intel_dpll_hw_state *dpll_hw_state)
1254 {
1255 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1256 
1257 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1258 		   hw_state->wrpll, hw_state->spll);
1259 }
1260 
1261 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1262 				 const struct intel_dpll_hw_state *_b)
1263 {
1264 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1265 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1266 
1267 	return a->wrpll == b->wrpll &&
1268 		a->spll == b->spll;
1269 }
1270 
1271 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1272 	.enable = hsw_ddi_wrpll_enable,
1273 	.disable = hsw_ddi_wrpll_disable,
1274 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1275 	.get_freq = hsw_ddi_wrpll_get_freq,
1276 };
1277 
1278 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1279 	.enable = hsw_ddi_spll_enable,
1280 	.disable = hsw_ddi_spll_disable,
1281 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1282 	.get_freq = hsw_ddi_spll_get_freq,
1283 };
1284 
1285 static void hsw_ddi_lcpll_enable(struct intel_display *display,
1286 				 struct intel_shared_dpll *pll,
1287 				 const struct intel_dpll_hw_state *hw_state)
1288 {
1289 }
1290 
1291 static void hsw_ddi_lcpll_disable(struct intel_display *display,
1292 				  struct intel_shared_dpll *pll)
1293 {
1294 }
1295 
1296 static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
1297 				       struct intel_shared_dpll *pll,
1298 				       struct intel_dpll_hw_state *dpll_hw_state)
1299 {
1300 	return true;
1301 }
1302 
1303 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1304 	.enable = hsw_ddi_lcpll_enable,
1305 	.disable = hsw_ddi_lcpll_disable,
1306 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1307 	.get_freq = hsw_ddi_lcpll_get_freq,
1308 };
1309 
1310 static const struct dpll_info hsw_plls[] = {
1311 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1312 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1313 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1314 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1315 	  .always_on = true, },
1316 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1317 	  .always_on = true, },
1318 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1319 	  .always_on = true, },
1320 	{}
1321 };
1322 
1323 static const struct intel_dpll_mgr hsw_pll_mgr = {
1324 	.dpll_info = hsw_plls,
1325 	.compute_dplls = hsw_compute_dpll,
1326 	.get_dplls = hsw_get_dpll,
1327 	.put_dplls = intel_put_dpll,
1328 	.update_ref_clks = hsw_update_dpll_ref_clks,
1329 	.dump_hw_state = hsw_dump_hw_state,
1330 	.compare_hw_state = hsw_compare_hw_state,
1331 };
1332 
1333 struct skl_dpll_regs {
1334 	i915_reg_t ctl, cfgcr1, cfgcr2;
1335 };
1336 
1337 /* this array is indexed by the *shared* pll id */
1338 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1339 	{
1340 		/* DPLL 0 */
1341 		.ctl = LCPLL1_CTL,
1342 		/* DPLL 0 doesn't support HDMI mode */
1343 	},
1344 	{
1345 		/* DPLL 1 */
1346 		.ctl = LCPLL2_CTL,
1347 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1348 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1349 	},
1350 	{
1351 		/* DPLL 2 */
1352 		.ctl = WRPLL_CTL(0),
1353 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1354 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1355 	},
1356 	{
1357 		/* DPLL 3 */
1358 		.ctl = WRPLL_CTL(1),
1359 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1360 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1361 	},
1362 };
1363 
1364 static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
1365 				    struct intel_shared_dpll *pll,
1366 				    const struct skl_dpll_hw_state *hw_state)
1367 {
1368 	const enum intel_dpll_id id = pll->info->id;
1369 
1370 	intel_de_rmw(display, DPLL_CTRL1,
1371 		     DPLL_CTRL1_HDMI_MODE(id) |
1372 		     DPLL_CTRL1_SSC(id) |
1373 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1374 		     hw_state->ctrl1 << (id * 6));
1375 	intel_de_posting_read(display, DPLL_CTRL1);
1376 }
1377 
1378 static void skl_ddi_pll_enable(struct intel_display *display,
1379 			       struct intel_shared_dpll *pll,
1380 			       const struct intel_dpll_hw_state *dpll_hw_state)
1381 {
1382 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1383 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1384 	const enum intel_dpll_id id = pll->info->id;
1385 
1386 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1387 
1388 	intel_de_write(display, regs[id].cfgcr1, hw_state->cfgcr1);
1389 	intel_de_write(display, regs[id].cfgcr2, hw_state->cfgcr2);
1390 	intel_de_posting_read(display, regs[id].cfgcr1);
1391 	intel_de_posting_read(display, regs[id].cfgcr2);
1392 
1393 	/* the enable bit is always bit 31 */
1394 	intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1395 
1396 	if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5))
1397 		drm_err(display->drm, "DPLL %d not locked\n", id);
1398 }
1399 
1400 static void skl_ddi_dpll0_enable(struct intel_display *display,
1401 				 struct intel_shared_dpll *pll,
1402 				 const struct intel_dpll_hw_state *dpll_hw_state)
1403 {
1404 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1405 
1406 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1407 }
1408 
1409 static void skl_ddi_pll_disable(struct intel_display *display,
1410 				struct intel_shared_dpll *pll)
1411 {
1412 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1413 	const enum intel_dpll_id id = pll->info->id;
1414 
1415 	/* the enable bit is always bit 31 */
1416 	intel_de_rmw(display, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1417 	intel_de_posting_read(display, regs[id].ctl);
1418 }
1419 
1420 static void skl_ddi_dpll0_disable(struct intel_display *display,
1421 				  struct intel_shared_dpll *pll)
1422 {
1423 }
1424 
1425 static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
1426 				     struct intel_shared_dpll *pll,
1427 				     struct intel_dpll_hw_state *dpll_hw_state)
1428 {
1429 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1430 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1431 	const enum intel_dpll_id id = pll->info->id;
1432 	intel_wakeref_t wakeref;
1433 	bool ret;
1434 	u32 val;
1435 
1436 	wakeref = intel_display_power_get_if_enabled(display,
1437 						     POWER_DOMAIN_DISPLAY_CORE);
1438 	if (!wakeref)
1439 		return false;
1440 
1441 	ret = false;
1442 
1443 	val = intel_de_read(display, regs[id].ctl);
1444 	if (!(val & LCPLL_PLL_ENABLE))
1445 		goto out;
1446 
1447 	val = intel_de_read(display, DPLL_CTRL1);
1448 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1449 
1450 	/* avoid reading back stale values if HDMI mode is not enabled */
1451 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1452 		hw_state->cfgcr1 = intel_de_read(display, regs[id].cfgcr1);
1453 		hw_state->cfgcr2 = intel_de_read(display, regs[id].cfgcr2);
1454 	}
1455 	ret = true;
1456 
1457 out:
1458 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1459 
1460 	return ret;
1461 }
1462 
1463 static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
1464 				       struct intel_shared_dpll *pll,
1465 				       struct intel_dpll_hw_state *dpll_hw_state)
1466 {
1467 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1468 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1469 	const enum intel_dpll_id id = pll->info->id;
1470 	intel_wakeref_t wakeref;
1471 	u32 val;
1472 	bool ret;
1473 
1474 	wakeref = intel_display_power_get_if_enabled(display,
1475 						     POWER_DOMAIN_DISPLAY_CORE);
1476 	if (!wakeref)
1477 		return false;
1478 
1479 	ret = false;
1480 
1481 	/* DPLL0 is always enabled since it drives CDCLK */
1482 	val = intel_de_read(display, regs[id].ctl);
1483 	if (drm_WARN_ON(display->drm, !(val & LCPLL_PLL_ENABLE)))
1484 		goto out;
1485 
1486 	val = intel_de_read(display, DPLL_CTRL1);
1487 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1488 
1489 	ret = true;
1490 
1491 out:
1492 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1493 
1494 	return ret;
1495 }
1496 
1497 struct skl_wrpll_context {
1498 	u64 min_deviation;		/* current minimal deviation */
1499 	u64 central_freq;		/* chosen central freq */
1500 	u64 dco_freq;			/* chosen dco freq */
1501 	unsigned int p;			/* chosen divider */
1502 };
1503 
1504 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1505 #define SKL_DCO_MAX_PDEVIATION	100
1506 #define SKL_DCO_MAX_NDEVIATION	600
1507 
1508 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1509 				  u64 central_freq,
1510 				  u64 dco_freq,
1511 				  unsigned int divider)
1512 {
1513 	u64 deviation;
1514 
1515 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1516 			      central_freq);
1517 
1518 	/* positive deviation */
1519 	if (dco_freq >= central_freq) {
1520 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1521 		    deviation < ctx->min_deviation) {
1522 			ctx->min_deviation = deviation;
1523 			ctx->central_freq = central_freq;
1524 			ctx->dco_freq = dco_freq;
1525 			ctx->p = divider;
1526 		}
1527 	/* negative deviation */
1528 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1529 		   deviation < ctx->min_deviation) {
1530 		ctx->min_deviation = deviation;
1531 		ctx->central_freq = central_freq;
1532 		ctx->dco_freq = dco_freq;
1533 		ctx->p = divider;
1534 	}
1535 }
1536 
1537 static void skl_wrpll_get_multipliers(unsigned int p,
1538 				      unsigned int *p0 /* out */,
1539 				      unsigned int *p1 /* out */,
1540 				      unsigned int *p2 /* out */)
1541 {
1542 	/* even dividers */
1543 	if (p % 2 == 0) {
1544 		unsigned int half = p / 2;
1545 
1546 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1547 			*p0 = 2;
1548 			*p1 = 1;
1549 			*p2 = half;
1550 		} else if (half % 2 == 0) {
1551 			*p0 = 2;
1552 			*p1 = half / 2;
1553 			*p2 = 2;
1554 		} else if (half % 3 == 0) {
1555 			*p0 = 3;
1556 			*p1 = half / 3;
1557 			*p2 = 2;
1558 		} else if (half % 7 == 0) {
1559 			*p0 = 7;
1560 			*p1 = half / 7;
1561 			*p2 = 2;
1562 		}
1563 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1564 		*p0 = 3;
1565 		*p1 = 1;
1566 		*p2 = p / 3;
1567 	} else if (p == 5 || p == 7) {
1568 		*p0 = p;
1569 		*p1 = 1;
1570 		*p2 = 1;
1571 	} else if (p == 15) {
1572 		*p0 = 3;
1573 		*p1 = 1;
1574 		*p2 = 5;
1575 	} else if (p == 21) {
1576 		*p0 = 7;
1577 		*p1 = 1;
1578 		*p2 = 3;
1579 	} else if (p == 35) {
1580 		*p0 = 7;
1581 		*p1 = 1;
1582 		*p2 = 5;
1583 	}
1584 }
1585 
1586 struct skl_wrpll_params {
1587 	u32 dco_fraction;
1588 	u32 dco_integer;
1589 	u32 qdiv_ratio;
1590 	u32 qdiv_mode;
1591 	u32 kdiv;
1592 	u32 pdiv;
1593 	u32 central_freq;
1594 };
1595 
1596 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1597 				      u64 afe_clock,
1598 				      int ref_clock,
1599 				      u64 central_freq,
1600 				      u32 p0, u32 p1, u32 p2)
1601 {
1602 	u64 dco_freq;
1603 
1604 	switch (central_freq) {
1605 	case 9600000000ULL:
1606 		params->central_freq = 0;
1607 		break;
1608 	case 9000000000ULL:
1609 		params->central_freq = 1;
1610 		break;
1611 	case 8400000000ULL:
1612 		params->central_freq = 3;
1613 	}
1614 
1615 	switch (p0) {
1616 	case 1:
1617 		params->pdiv = 0;
1618 		break;
1619 	case 2:
1620 		params->pdiv = 1;
1621 		break;
1622 	case 3:
1623 		params->pdiv = 2;
1624 		break;
1625 	case 7:
1626 		params->pdiv = 4;
1627 		break;
1628 	default:
1629 		WARN(1, "Incorrect PDiv\n");
1630 	}
1631 
1632 	switch (p2) {
1633 	case 5:
1634 		params->kdiv = 0;
1635 		break;
1636 	case 2:
1637 		params->kdiv = 1;
1638 		break;
1639 	case 3:
1640 		params->kdiv = 2;
1641 		break;
1642 	case 1:
1643 		params->kdiv = 3;
1644 		break;
1645 	default:
1646 		WARN(1, "Incorrect KDiv\n");
1647 	}
1648 
1649 	params->qdiv_ratio = p1;
1650 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1651 
1652 	dco_freq = p0 * p1 * p2 * afe_clock;
1653 
1654 	/*
1655 	 * Intermediate values are in Hz.
1656 	 * Divide by MHz to match bsepc
1657 	 */
1658 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1659 	params->dco_fraction =
1660 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1661 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1662 }
1663 
1664 static int
1665 skl_ddi_calculate_wrpll(int clock,
1666 			int ref_clock,
1667 			struct skl_wrpll_params *wrpll_params)
1668 {
1669 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1670 						 9000000000ULL,
1671 						 9600000000ULL };
1672 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1673 					    24, 28, 30, 32, 36, 40, 42, 44,
1674 					    48, 52, 54, 56, 60, 64, 66, 68,
1675 					    70, 72, 76, 78, 80, 84, 88, 90,
1676 					    92, 96, 98 };
1677 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1678 	static const struct {
1679 		const u8 *list;
1680 		int n_dividers;
1681 	} dividers[] = {
1682 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1683 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1684 	};
1685 	struct skl_wrpll_context ctx = {
1686 		.min_deviation = U64_MAX,
1687 	};
1688 	unsigned int dco, d, i;
1689 	unsigned int p0, p1, p2;
1690 	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1691 
1692 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1693 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1694 			for (i = 0; i < dividers[d].n_dividers; i++) {
1695 				unsigned int p = dividers[d].list[i];
1696 				u64 dco_freq = p * afe_clock;
1697 
1698 				skl_wrpll_try_divider(&ctx,
1699 						      dco_central_freq[dco],
1700 						      dco_freq,
1701 						      p);
1702 				/*
1703 				 * Skip the remaining dividers if we're sure to
1704 				 * have found the definitive divider, we can't
1705 				 * improve a 0 deviation.
1706 				 */
1707 				if (ctx.min_deviation == 0)
1708 					goto skip_remaining_dividers;
1709 			}
1710 		}
1711 
1712 skip_remaining_dividers:
1713 		/*
1714 		 * If a solution is found with an even divider, prefer
1715 		 * this one.
1716 		 */
1717 		if (d == 0 && ctx.p)
1718 			break;
1719 	}
1720 
1721 	if (!ctx.p)
1722 		return -EINVAL;
1723 
1724 	/*
1725 	 * gcc incorrectly analyses that these can be used without being
1726 	 * initialized. To be fair, it's hard to guess.
1727 	 */
1728 	p0 = p1 = p2 = 0;
1729 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1730 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1731 				  ctx.central_freq, p0, p1, p2);
1732 
1733 	return 0;
1734 }
1735 
1736 static int skl_ddi_wrpll_get_freq(struct intel_display *display,
1737 				  const struct intel_shared_dpll *pll,
1738 				  const struct intel_dpll_hw_state *dpll_hw_state)
1739 {
1740 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1741 	int ref_clock = display->dpll.ref_clks.nssc;
1742 	u32 p0, p1, p2, dco_freq;
1743 
1744 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1745 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1746 
1747 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1748 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1749 	else
1750 		p1 = 1;
1751 
1752 
1753 	switch (p0) {
1754 	case DPLL_CFGCR2_PDIV_1:
1755 		p0 = 1;
1756 		break;
1757 	case DPLL_CFGCR2_PDIV_2:
1758 		p0 = 2;
1759 		break;
1760 	case DPLL_CFGCR2_PDIV_3:
1761 		p0 = 3;
1762 		break;
1763 	case DPLL_CFGCR2_PDIV_7_INVALID:
1764 		/*
1765 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1766 		 * handling it the same way as PDIV_7.
1767 		 */
1768 		drm_dbg_kms(display->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1769 		fallthrough;
1770 	case DPLL_CFGCR2_PDIV_7:
1771 		p0 = 7;
1772 		break;
1773 	default:
1774 		MISSING_CASE(p0);
1775 		return 0;
1776 	}
1777 
1778 	switch (p2) {
1779 	case DPLL_CFGCR2_KDIV_5:
1780 		p2 = 5;
1781 		break;
1782 	case DPLL_CFGCR2_KDIV_2:
1783 		p2 = 2;
1784 		break;
1785 	case DPLL_CFGCR2_KDIV_3:
1786 		p2 = 3;
1787 		break;
1788 	case DPLL_CFGCR2_KDIV_1:
1789 		p2 = 1;
1790 		break;
1791 	default:
1792 		MISSING_CASE(p2);
1793 		return 0;
1794 	}
1795 
1796 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1797 		   ref_clock;
1798 
1799 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1800 		    ref_clock / 0x8000;
1801 
1802 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
1803 		return 0;
1804 
1805 	return dco_freq / (p0 * p1 * p2 * 5);
1806 }
1807 
1808 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1809 {
1810 	struct intel_display *display = to_intel_display(crtc_state);
1811 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1812 	struct skl_wrpll_params wrpll_params = {};
1813 	int ret;
1814 
1815 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1816 				      display->dpll.ref_clks.nssc, &wrpll_params);
1817 	if (ret)
1818 		return ret;
1819 
1820 	/*
1821 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1822 	 * as the DPLL id in this function.
1823 	 */
1824 	hw_state->ctrl1 =
1825 		DPLL_CTRL1_OVERRIDE(0) |
1826 		DPLL_CTRL1_HDMI_MODE(0);
1827 
1828 	hw_state->cfgcr1 =
1829 		DPLL_CFGCR1_FREQ_ENABLE |
1830 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1831 		wrpll_params.dco_integer;
1832 
1833 	hw_state->cfgcr2 =
1834 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1835 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1836 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1837 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1838 		wrpll_params.central_freq;
1839 
1840 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(display, NULL,
1841 							&crtc_state->dpll_hw_state);
1842 
1843 	return 0;
1844 }
1845 
1846 static int
1847 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1848 {
1849 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1850 	u32 ctrl1;
1851 
1852 	/*
1853 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1854 	 * as the DPLL id in this function.
1855 	 */
1856 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1857 	switch (crtc_state->port_clock / 2) {
1858 	case 81000:
1859 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1860 		break;
1861 	case 135000:
1862 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1863 		break;
1864 	case 270000:
1865 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1866 		break;
1867 		/* eDP 1.4 rates */
1868 	case 162000:
1869 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1870 		break;
1871 	case 108000:
1872 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1873 		break;
1874 	case 216000:
1875 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1876 		break;
1877 	}
1878 
1879 	hw_state->ctrl1 = ctrl1;
1880 
1881 	return 0;
1882 }
1883 
1884 static int skl_ddi_lcpll_get_freq(struct intel_display *display,
1885 				  const struct intel_shared_dpll *pll,
1886 				  const struct intel_dpll_hw_state *dpll_hw_state)
1887 {
1888 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1889 	int link_clock = 0;
1890 
1891 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1892 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1893 	case DPLL_CTRL1_LINK_RATE_810:
1894 		link_clock = 81000;
1895 		break;
1896 	case DPLL_CTRL1_LINK_RATE_1080:
1897 		link_clock = 108000;
1898 		break;
1899 	case DPLL_CTRL1_LINK_RATE_1350:
1900 		link_clock = 135000;
1901 		break;
1902 	case DPLL_CTRL1_LINK_RATE_1620:
1903 		link_clock = 162000;
1904 		break;
1905 	case DPLL_CTRL1_LINK_RATE_2160:
1906 		link_clock = 216000;
1907 		break;
1908 	case DPLL_CTRL1_LINK_RATE_2700:
1909 		link_clock = 270000;
1910 		break;
1911 	default:
1912 		drm_WARN(display->drm, 1, "Unsupported link rate\n");
1913 		break;
1914 	}
1915 
1916 	return link_clock * 2;
1917 }
1918 
1919 static int skl_compute_dpll(struct intel_atomic_state *state,
1920 			    struct intel_crtc *crtc,
1921 			    struct intel_encoder *encoder)
1922 {
1923 	struct intel_crtc_state *crtc_state =
1924 		intel_atomic_get_new_crtc_state(state, crtc);
1925 
1926 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1927 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1928 	else if (intel_crtc_has_dp_encoder(crtc_state))
1929 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1930 	else
1931 		return -EINVAL;
1932 }
1933 
1934 static int skl_get_dpll(struct intel_atomic_state *state,
1935 			struct intel_crtc *crtc,
1936 			struct intel_encoder *encoder)
1937 {
1938 	struct intel_crtc_state *crtc_state =
1939 		intel_atomic_get_new_crtc_state(state, crtc);
1940 	struct intel_shared_dpll *pll;
1941 
1942 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1943 		pll = intel_find_shared_dpll(state, crtc,
1944 					     &crtc_state->dpll_hw_state,
1945 					     BIT(DPLL_ID_SKL_DPLL0));
1946 	else
1947 		pll = intel_find_shared_dpll(state, crtc,
1948 					     &crtc_state->dpll_hw_state,
1949 					     BIT(DPLL_ID_SKL_DPLL3) |
1950 					     BIT(DPLL_ID_SKL_DPLL2) |
1951 					     BIT(DPLL_ID_SKL_DPLL1));
1952 	if (!pll)
1953 		return -EINVAL;
1954 
1955 	intel_reference_shared_dpll(state, crtc,
1956 				    pll, &crtc_state->dpll_hw_state);
1957 
1958 	crtc_state->shared_dpll = pll;
1959 
1960 	return 0;
1961 }
1962 
1963 static int skl_ddi_pll_get_freq(struct intel_display *display,
1964 				const struct intel_shared_dpll *pll,
1965 				const struct intel_dpll_hw_state *dpll_hw_state)
1966 {
1967 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1968 
1969 	/*
1970 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1971 	 * the internal shift for each field
1972 	 */
1973 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1974 		return skl_ddi_wrpll_get_freq(display, pll, dpll_hw_state);
1975 	else
1976 		return skl_ddi_lcpll_get_freq(display, pll, dpll_hw_state);
1977 }
1978 
1979 static void skl_update_dpll_ref_clks(struct intel_display *display)
1980 {
1981 	/* No SSC ref */
1982 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
1983 }
1984 
1985 static void skl_dump_hw_state(struct drm_printer *p,
1986 			      const struct intel_dpll_hw_state *dpll_hw_state)
1987 {
1988 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1989 
1990 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1991 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1992 }
1993 
1994 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1995 				 const struct intel_dpll_hw_state *_b)
1996 {
1997 	const struct skl_dpll_hw_state *a = &_a->skl;
1998 	const struct skl_dpll_hw_state *b = &_b->skl;
1999 
2000 	return a->ctrl1 == b->ctrl1 &&
2001 		a->cfgcr1 == b->cfgcr1 &&
2002 		a->cfgcr2 == b->cfgcr2;
2003 }
2004 
2005 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2006 	.enable = skl_ddi_pll_enable,
2007 	.disable = skl_ddi_pll_disable,
2008 	.get_hw_state = skl_ddi_pll_get_hw_state,
2009 	.get_freq = skl_ddi_pll_get_freq,
2010 };
2011 
2012 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2013 	.enable = skl_ddi_dpll0_enable,
2014 	.disable = skl_ddi_dpll0_disable,
2015 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2016 	.get_freq = skl_ddi_pll_get_freq,
2017 };
2018 
2019 static const struct dpll_info skl_plls[] = {
2020 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2021 	  .always_on = true, },
2022 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2023 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2024 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2025 	{}
2026 };
2027 
2028 static const struct intel_dpll_mgr skl_pll_mgr = {
2029 	.dpll_info = skl_plls,
2030 	.compute_dplls = skl_compute_dpll,
2031 	.get_dplls = skl_get_dpll,
2032 	.put_dplls = intel_put_dpll,
2033 	.update_ref_clks = skl_update_dpll_ref_clks,
2034 	.dump_hw_state = skl_dump_hw_state,
2035 	.compare_hw_state = skl_compare_hw_state,
2036 };
2037 
2038 static void bxt_ddi_pll_enable(struct intel_display *display,
2039 			       struct intel_shared_dpll *pll,
2040 			       const struct intel_dpll_hw_state *dpll_hw_state)
2041 {
2042 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2043 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2044 	enum dpio_phy phy = DPIO_PHY0;
2045 	enum dpio_channel ch = DPIO_CH0;
2046 	u32 temp;
2047 
2048 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2049 
2050 	/* Non-SSC reference */
2051 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2052 
2053 	if (display->platform.geminilake) {
2054 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2055 			     0, PORT_PLL_POWER_ENABLE);
2056 
2057 		if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
2058 				 PORT_PLL_POWER_STATE), 200))
2059 			drm_err(display->drm,
2060 				"Power state not set for PLL:%d\n", port);
2061 	}
2062 
2063 	/* Disable 10 bit clock */
2064 	intel_de_rmw(display, BXT_PORT_PLL_EBB_4(phy, ch),
2065 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2066 
2067 	/* Write P1 & P2 */
2068 	intel_de_rmw(display, BXT_PORT_PLL_EBB_0(phy, ch),
2069 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2070 
2071 	/* Write M2 integer */
2072 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 0),
2073 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2074 
2075 	/* Write N */
2076 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 1),
2077 		     PORT_PLL_N_MASK, hw_state->pll1);
2078 
2079 	/* Write M2 fraction */
2080 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 2),
2081 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2082 
2083 	/* Write M2 fraction enable */
2084 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 3),
2085 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2086 
2087 	/* Write coeff */
2088 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2089 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2090 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2091 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2092 	temp |= hw_state->pll6;
2093 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 6), temp);
2094 
2095 	/* Write calibration val */
2096 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 8),
2097 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2098 
2099 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 9),
2100 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2101 
2102 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2103 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2104 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2105 	temp |= hw_state->pll10;
2106 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 10), temp);
2107 
2108 	/* Recalibrate with new settings */
2109 	temp = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2110 	temp |= PORT_PLL_RECALIBRATE;
2111 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2112 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2113 	temp |= hw_state->ebb4;
2114 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2115 
2116 	/* Enable PLL */
2117 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2118 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2119 
2120 	if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2121 			200))
2122 		drm_err(display->drm, "PLL %d not locked\n", port);
2123 
2124 	if (display->platform.geminilake) {
2125 		temp = intel_de_read(display, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2126 		temp |= DCC_DELAY_RANGE_2;
2127 		intel_de_write(display, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2128 	}
2129 
2130 	/*
2131 	 * While we write to the group register to program all lanes at once we
2132 	 * can read only lane registers and we pick lanes 0/1 for that.
2133 	 */
2134 	temp = intel_de_read(display, BXT_PORT_PCS_DW12_LN01(phy, ch));
2135 	temp &= ~LANE_STAGGER_MASK;
2136 	temp &= ~LANESTAGGER_STRAP_OVRD;
2137 	temp |= hw_state->pcsdw12;
2138 	intel_de_write(display, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2139 }
2140 
2141 static void bxt_ddi_pll_disable(struct intel_display *display,
2142 				struct intel_shared_dpll *pll)
2143 {
2144 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2145 
2146 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2147 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2148 
2149 	if (display->platform.geminilake) {
2150 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2151 			     PORT_PLL_POWER_ENABLE, 0);
2152 
2153 		if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
2154 				  PORT_PLL_POWER_STATE), 200))
2155 			drm_err(display->drm,
2156 				"Power state not reset for PLL:%d\n", port);
2157 	}
2158 }
2159 
2160 static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
2161 				     struct intel_shared_dpll *pll,
2162 				     struct intel_dpll_hw_state *dpll_hw_state)
2163 {
2164 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2165 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2166 	intel_wakeref_t wakeref;
2167 	enum dpio_phy phy;
2168 	enum dpio_channel ch;
2169 	u32 val;
2170 	bool ret;
2171 
2172 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2173 
2174 	wakeref = intel_display_power_get_if_enabled(display,
2175 						     POWER_DOMAIN_DISPLAY_CORE);
2176 	if (!wakeref)
2177 		return false;
2178 
2179 	ret = false;
2180 
2181 	val = intel_de_read(display, BXT_PORT_PLL_ENABLE(port));
2182 	if (!(val & PORT_PLL_ENABLE))
2183 		goto out;
2184 
2185 	hw_state->ebb0 = intel_de_read(display, BXT_PORT_PLL_EBB_0(phy, ch));
2186 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2187 
2188 	hw_state->ebb4 = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2189 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2190 
2191 	hw_state->pll0 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 0));
2192 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2193 
2194 	hw_state->pll1 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 1));
2195 	hw_state->pll1 &= PORT_PLL_N_MASK;
2196 
2197 	hw_state->pll2 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 2));
2198 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2199 
2200 	hw_state->pll3 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 3));
2201 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2202 
2203 	hw_state->pll6 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2204 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2205 			  PORT_PLL_INT_COEFF_MASK |
2206 			  PORT_PLL_GAIN_CTL_MASK;
2207 
2208 	hw_state->pll8 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 8));
2209 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2210 
2211 	hw_state->pll9 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 9));
2212 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2213 
2214 	hw_state->pll10 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2215 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2216 			   PORT_PLL_DCO_AMP_MASK;
2217 
2218 	/*
2219 	 * While we write to the group register to program all lanes at once we
2220 	 * can read only lane registers. We configure all lanes the same way, so
2221 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2222 	 */
2223 	hw_state->pcsdw12 = intel_de_read(display,
2224 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2225 	if (intel_de_read(display, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2226 		drm_dbg(display->drm,
2227 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2228 			hw_state->pcsdw12,
2229 			intel_de_read(display,
2230 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2231 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2232 
2233 	ret = true;
2234 
2235 out:
2236 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2237 
2238 	return ret;
2239 }
2240 
2241 /* pre-calculated values for DP linkrates */
2242 static const struct dpll bxt_dp_clk_val[] = {
2243 	/* m2 is .22 binary fixed point */
2244 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2245 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2246 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2247 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2248 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2249 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2250 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2251 };
2252 
2253 static int
2254 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2255 			  struct dpll *clk_div)
2256 {
2257 	struct intel_display *display = to_intel_display(crtc_state);
2258 
2259 	/* Calculate HDMI div */
2260 	/*
2261 	 * FIXME: tie the following calculation into
2262 	 * i9xx_crtc_compute_clock
2263 	 */
2264 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2265 		return -EINVAL;
2266 
2267 	drm_WARN_ON(display->drm, clk_div->m1 != 2);
2268 
2269 	return 0;
2270 }
2271 
2272 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2273 				    struct dpll *clk_div)
2274 {
2275 	struct intel_display *display = to_intel_display(crtc_state);
2276 	int i;
2277 
2278 	*clk_div = bxt_dp_clk_val[0];
2279 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2280 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2281 			*clk_div = bxt_dp_clk_val[i];
2282 			break;
2283 		}
2284 	}
2285 
2286 	chv_calc_dpll_params(display->dpll.ref_clks.nssc, clk_div);
2287 
2288 	drm_WARN_ON(display->drm, clk_div->vco == 0 ||
2289 		    clk_div->dot != crtc_state->port_clock);
2290 }
2291 
2292 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2293 				     const struct dpll *clk_div)
2294 {
2295 	struct intel_display *display = to_intel_display(crtc_state);
2296 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2297 	int clock = crtc_state->port_clock;
2298 	int vco = clk_div->vco;
2299 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2300 	u32 lanestagger;
2301 
2302 	if (vco >= 6200000 && vco <= 6700000) {
2303 		prop_coef = 4;
2304 		int_coef = 9;
2305 		gain_ctl = 3;
2306 		targ_cnt = 8;
2307 	} else if ((vco > 5400000 && vco < 6200000) ||
2308 			(vco >= 4800000 && vco < 5400000)) {
2309 		prop_coef = 5;
2310 		int_coef = 11;
2311 		gain_ctl = 3;
2312 		targ_cnt = 9;
2313 	} else if (vco == 5400000) {
2314 		prop_coef = 3;
2315 		int_coef = 8;
2316 		gain_ctl = 1;
2317 		targ_cnt = 9;
2318 	} else {
2319 		drm_err(display->drm, "Invalid VCO\n");
2320 		return -EINVAL;
2321 	}
2322 
2323 	if (clock > 270000)
2324 		lanestagger = 0x18;
2325 	else if (clock > 135000)
2326 		lanestagger = 0x0d;
2327 	else if (clock > 67000)
2328 		lanestagger = 0x07;
2329 	else if (clock > 33000)
2330 		lanestagger = 0x04;
2331 	else
2332 		lanestagger = 0x02;
2333 
2334 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2335 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2336 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2337 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2338 
2339 	if (clk_div->m2 & 0x3fffff)
2340 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2341 
2342 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2343 		PORT_PLL_INT_COEFF(int_coef) |
2344 		PORT_PLL_GAIN_CTL(gain_ctl);
2345 
2346 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2347 
2348 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2349 
2350 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2351 		PORT_PLL_DCO_AMP_OVR_EN_H;
2352 
2353 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2354 
2355 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2356 
2357 	return 0;
2358 }
2359 
2360 static int bxt_ddi_pll_get_freq(struct intel_display *display,
2361 				const struct intel_shared_dpll *pll,
2362 				const struct intel_dpll_hw_state *dpll_hw_state)
2363 {
2364 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2365 	struct dpll clock;
2366 
2367 	clock.m1 = 2;
2368 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2369 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2370 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2371 					  hw_state->pll2);
2372 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2373 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2374 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2375 
2376 	return chv_calc_dpll_params(display->dpll.ref_clks.nssc, &clock);
2377 }
2378 
2379 static int
2380 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2381 {
2382 	struct dpll clk_div = {};
2383 
2384 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2385 
2386 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2387 }
2388 
2389 static int
2390 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2391 {
2392 	struct intel_display *display = to_intel_display(crtc_state);
2393 	struct dpll clk_div = {};
2394 	int ret;
2395 
2396 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2397 
2398 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2399 	if (ret)
2400 		return ret;
2401 
2402 	crtc_state->port_clock = bxt_ddi_pll_get_freq(display, NULL,
2403 						      &crtc_state->dpll_hw_state);
2404 
2405 	return 0;
2406 }
2407 
2408 static int bxt_compute_dpll(struct intel_atomic_state *state,
2409 			    struct intel_crtc *crtc,
2410 			    struct intel_encoder *encoder)
2411 {
2412 	struct intel_crtc_state *crtc_state =
2413 		intel_atomic_get_new_crtc_state(state, crtc);
2414 
2415 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2416 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2417 	else if (intel_crtc_has_dp_encoder(crtc_state))
2418 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2419 	else
2420 		return -EINVAL;
2421 }
2422 
2423 static int bxt_get_dpll(struct intel_atomic_state *state,
2424 			struct intel_crtc *crtc,
2425 			struct intel_encoder *encoder)
2426 {
2427 	struct intel_display *display = to_intel_display(state);
2428 	struct intel_crtc_state *crtc_state =
2429 		intel_atomic_get_new_crtc_state(state, crtc);
2430 	struct intel_shared_dpll *pll;
2431 	enum intel_dpll_id id;
2432 
2433 	/* 1:1 mapping between ports and PLLs */
2434 	id = (enum intel_dpll_id) encoder->port;
2435 	pll = intel_get_shared_dpll_by_id(display, id);
2436 
2437 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2438 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2439 
2440 	intel_reference_shared_dpll(state, crtc,
2441 				    pll, &crtc_state->dpll_hw_state);
2442 
2443 	crtc_state->shared_dpll = pll;
2444 
2445 	return 0;
2446 }
2447 
2448 static void bxt_update_dpll_ref_clks(struct intel_display *display)
2449 {
2450 	display->dpll.ref_clks.ssc = 100000;
2451 	display->dpll.ref_clks.nssc = 100000;
2452 	/* DSI non-SSC ref 19.2MHz */
2453 }
2454 
2455 static void bxt_dump_hw_state(struct drm_printer *p,
2456 			      const struct intel_dpll_hw_state *dpll_hw_state)
2457 {
2458 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2459 
2460 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2461 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2462 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2463 		   hw_state->ebb0, hw_state->ebb4,
2464 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2465 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2466 		   hw_state->pcsdw12);
2467 }
2468 
2469 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2470 				 const struct intel_dpll_hw_state *_b)
2471 {
2472 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2473 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2474 
2475 	return a->ebb0 == b->ebb0 &&
2476 		a->ebb4 == b->ebb4 &&
2477 		a->pll0 == b->pll0 &&
2478 		a->pll1 == b->pll1 &&
2479 		a->pll2 == b->pll2 &&
2480 		a->pll3 == b->pll3 &&
2481 		a->pll6 == b->pll6 &&
2482 		a->pll8 == b->pll8 &&
2483 		a->pll10 == b->pll10 &&
2484 		a->pcsdw12 == b->pcsdw12;
2485 }
2486 
2487 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2488 	.enable = bxt_ddi_pll_enable,
2489 	.disable = bxt_ddi_pll_disable,
2490 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2491 	.get_freq = bxt_ddi_pll_get_freq,
2492 };
2493 
2494 static const struct dpll_info bxt_plls[] = {
2495 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2496 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2497 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2498 	{}
2499 };
2500 
2501 static const struct intel_dpll_mgr bxt_pll_mgr = {
2502 	.dpll_info = bxt_plls,
2503 	.compute_dplls = bxt_compute_dpll,
2504 	.get_dplls = bxt_get_dpll,
2505 	.put_dplls = intel_put_dpll,
2506 	.update_ref_clks = bxt_update_dpll_ref_clks,
2507 	.dump_hw_state = bxt_dump_hw_state,
2508 	.compare_hw_state = bxt_compare_hw_state,
2509 };
2510 
2511 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2512 				      int *qdiv, int *kdiv)
2513 {
2514 	/* even dividers */
2515 	if (bestdiv % 2 == 0) {
2516 		if (bestdiv == 2) {
2517 			*pdiv = 2;
2518 			*qdiv = 1;
2519 			*kdiv = 1;
2520 		} else if (bestdiv % 4 == 0) {
2521 			*pdiv = 2;
2522 			*qdiv = bestdiv / 4;
2523 			*kdiv = 2;
2524 		} else if (bestdiv % 6 == 0) {
2525 			*pdiv = 3;
2526 			*qdiv = bestdiv / 6;
2527 			*kdiv = 2;
2528 		} else if (bestdiv % 5 == 0) {
2529 			*pdiv = 5;
2530 			*qdiv = bestdiv / 10;
2531 			*kdiv = 2;
2532 		} else if (bestdiv % 14 == 0) {
2533 			*pdiv = 7;
2534 			*qdiv = bestdiv / 14;
2535 			*kdiv = 2;
2536 		}
2537 	} else {
2538 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2539 			*pdiv = bestdiv;
2540 			*qdiv = 1;
2541 			*kdiv = 1;
2542 		} else { /* 9, 15, 21 */
2543 			*pdiv = bestdiv / 3;
2544 			*qdiv = 1;
2545 			*kdiv = 3;
2546 		}
2547 	}
2548 }
2549 
2550 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2551 				      u32 dco_freq, u32 ref_freq,
2552 				      int pdiv, int qdiv, int kdiv)
2553 {
2554 	u32 dco;
2555 
2556 	switch (kdiv) {
2557 	case 1:
2558 		params->kdiv = 1;
2559 		break;
2560 	case 2:
2561 		params->kdiv = 2;
2562 		break;
2563 	case 3:
2564 		params->kdiv = 4;
2565 		break;
2566 	default:
2567 		WARN(1, "Incorrect KDiv\n");
2568 	}
2569 
2570 	switch (pdiv) {
2571 	case 2:
2572 		params->pdiv = 1;
2573 		break;
2574 	case 3:
2575 		params->pdiv = 2;
2576 		break;
2577 	case 5:
2578 		params->pdiv = 4;
2579 		break;
2580 	case 7:
2581 		params->pdiv = 8;
2582 		break;
2583 	default:
2584 		WARN(1, "Incorrect PDiv\n");
2585 	}
2586 
2587 	WARN_ON(kdiv != 2 && qdiv != 1);
2588 
2589 	params->qdiv_ratio = qdiv;
2590 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2591 
2592 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2593 
2594 	params->dco_integer = dco >> 15;
2595 	params->dco_fraction = dco & 0x7fff;
2596 }
2597 
2598 /*
2599  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2600  * Program half of the nominal DCO divider fraction value.
2601  */
2602 static bool
2603 ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
2604 {
2605 	return ((display->platform.elkhartlake &&
2606 		 IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
2607 		DISPLAY_VER(display) >= 12) &&
2608 		display->dpll.ref_clks.nssc == 38400;
2609 }
2610 
2611 struct icl_combo_pll_params {
2612 	int clock;
2613 	struct skl_wrpll_params wrpll;
2614 };
2615 
2616 /*
2617  * These values alrea already adjusted: they're the bits we write to the
2618  * registers, not the logical values.
2619  */
2620 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2621 	{ 540000,
2622 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2623 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2624 	{ 270000,
2625 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2626 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2627 	{ 162000,
2628 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2629 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2630 	{ 324000,
2631 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2632 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2633 	{ 216000,
2634 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2635 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2636 	{ 432000,
2637 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2638 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2639 	{ 648000,
2640 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2641 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2642 	{ 810000,
2643 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2644 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2645 };
2646 
2647 
2648 /* Also used for 38.4 MHz values. */
2649 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2650 	{ 540000,
2651 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2652 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2653 	{ 270000,
2654 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2655 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2656 	{ 162000,
2657 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2658 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2659 	{ 324000,
2660 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2661 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2662 	{ 216000,
2663 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2664 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2665 	{ 432000,
2666 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2667 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2668 	{ 648000,
2669 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2670 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2671 	{ 810000,
2672 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2673 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2674 };
2675 
2676 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2677 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2678 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2679 };
2680 
2681 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2682 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2683 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2684 };
2685 
2686 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2687 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2688 	/* the following params are unused */
2689 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2690 };
2691 
2692 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2693 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2694 	/* the following params are unused */
2695 };
2696 
2697 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2698 				 struct skl_wrpll_params *pll_params)
2699 {
2700 	struct intel_display *display = to_intel_display(crtc_state);
2701 	const struct icl_combo_pll_params *params =
2702 		display->dpll.ref_clks.nssc == 24000 ?
2703 		icl_dp_combo_pll_24MHz_values :
2704 		icl_dp_combo_pll_19_2MHz_values;
2705 	int clock = crtc_state->port_clock;
2706 	int i;
2707 
2708 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2709 		if (clock == params[i].clock) {
2710 			*pll_params = params[i].wrpll;
2711 			return 0;
2712 		}
2713 	}
2714 
2715 	MISSING_CASE(clock);
2716 	return -EINVAL;
2717 }
2718 
2719 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2720 			    struct skl_wrpll_params *pll_params)
2721 {
2722 	struct intel_display *display = to_intel_display(crtc_state);
2723 
2724 	if (DISPLAY_VER(display) >= 12) {
2725 		switch (display->dpll.ref_clks.nssc) {
2726 		default:
2727 			MISSING_CASE(display->dpll.ref_clks.nssc);
2728 			fallthrough;
2729 		case 19200:
2730 		case 38400:
2731 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2732 			break;
2733 		case 24000:
2734 			*pll_params = tgl_tbt_pll_24MHz_values;
2735 			break;
2736 		}
2737 	} else {
2738 		switch (display->dpll.ref_clks.nssc) {
2739 		default:
2740 			MISSING_CASE(display->dpll.ref_clks.nssc);
2741 			fallthrough;
2742 		case 19200:
2743 		case 38400:
2744 			*pll_params = icl_tbt_pll_19_2MHz_values;
2745 			break;
2746 		case 24000:
2747 			*pll_params = icl_tbt_pll_24MHz_values;
2748 			break;
2749 		}
2750 	}
2751 
2752 	return 0;
2753 }
2754 
2755 static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
2756 				    const struct intel_shared_dpll *pll,
2757 				    const struct intel_dpll_hw_state *dpll_hw_state)
2758 {
2759 	/*
2760 	 * The PLL outputs multiple frequencies at the same time, selection is
2761 	 * made at DDI clock mux level.
2762 	 */
2763 	drm_WARN_ON(display->drm, 1);
2764 
2765 	return 0;
2766 }
2767 
2768 static int icl_wrpll_ref_clock(struct intel_display *display)
2769 {
2770 	int ref_clock = display->dpll.ref_clks.nssc;
2771 
2772 	/*
2773 	 * For ICL+, the spec states: if reference frequency is 38.4,
2774 	 * use 19.2 because the DPLL automatically divides that by 2.
2775 	 */
2776 	if (ref_clock == 38400)
2777 		ref_clock = 19200;
2778 
2779 	return ref_clock;
2780 }
2781 
2782 static int
2783 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2784 	       struct skl_wrpll_params *wrpll_params)
2785 {
2786 	struct intel_display *display = to_intel_display(crtc_state);
2787 	int ref_clock = icl_wrpll_ref_clock(display);
2788 	u32 afe_clock = crtc_state->port_clock * 5;
2789 	u32 dco_min = 7998000;
2790 	u32 dco_max = 10000000;
2791 	u32 dco_mid = (dco_min + dco_max) / 2;
2792 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2793 					 18, 20, 24, 28, 30, 32,  36,  40,
2794 					 42, 44, 48, 50, 52, 54,  56,  60,
2795 					 64, 66, 68, 70, 72, 76,  78,  80,
2796 					 84, 88, 90, 92, 96, 98, 100, 102,
2797 					  3,  5,  7,  9, 15, 21 };
2798 	u32 dco, best_dco = 0, dco_centrality = 0;
2799 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2800 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2801 
2802 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2803 		dco = afe_clock * dividers[d];
2804 
2805 		if (dco <= dco_max && dco >= dco_min) {
2806 			dco_centrality = abs(dco - dco_mid);
2807 
2808 			if (dco_centrality < best_dco_centrality) {
2809 				best_dco_centrality = dco_centrality;
2810 				best_div = dividers[d];
2811 				best_dco = dco;
2812 			}
2813 		}
2814 	}
2815 
2816 	if (best_div == 0)
2817 		return -EINVAL;
2818 
2819 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2820 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2821 				  pdiv, qdiv, kdiv);
2822 
2823 	return 0;
2824 }
2825 
2826 static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
2827 				      const struct intel_shared_dpll *pll,
2828 				      const struct intel_dpll_hw_state *dpll_hw_state)
2829 {
2830 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2831 	int ref_clock = icl_wrpll_ref_clock(display);
2832 	u32 dco_fraction;
2833 	u32 p0, p1, p2, dco_freq;
2834 
2835 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2836 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2837 
2838 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2839 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2840 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2841 	else
2842 		p1 = 1;
2843 
2844 	switch (p0) {
2845 	case DPLL_CFGCR1_PDIV_2:
2846 		p0 = 2;
2847 		break;
2848 	case DPLL_CFGCR1_PDIV_3:
2849 		p0 = 3;
2850 		break;
2851 	case DPLL_CFGCR1_PDIV_5:
2852 		p0 = 5;
2853 		break;
2854 	case DPLL_CFGCR1_PDIV_7:
2855 		p0 = 7;
2856 		break;
2857 	}
2858 
2859 	switch (p2) {
2860 	case DPLL_CFGCR1_KDIV_1:
2861 		p2 = 1;
2862 		break;
2863 	case DPLL_CFGCR1_KDIV_2:
2864 		p2 = 2;
2865 		break;
2866 	case DPLL_CFGCR1_KDIV_3:
2867 		p2 = 3;
2868 		break;
2869 	}
2870 
2871 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2872 		   ref_clock;
2873 
2874 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2875 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2876 
2877 	if (ehl_combo_pll_div_frac_wa_needed(display))
2878 		dco_fraction *= 2;
2879 
2880 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2881 
2882 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
2883 		return 0;
2884 
2885 	return dco_freq / (p0 * p1 * p2 * 5);
2886 }
2887 
2888 static void icl_calc_dpll_state(struct intel_display *display,
2889 				const struct skl_wrpll_params *pll_params,
2890 				struct intel_dpll_hw_state *dpll_hw_state)
2891 {
2892 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2893 	u32 dco_fraction = pll_params->dco_fraction;
2894 
2895 	if (ehl_combo_pll_div_frac_wa_needed(display))
2896 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2897 
2898 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2899 			    pll_params->dco_integer;
2900 
2901 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2902 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2903 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2904 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2905 
2906 	if (DISPLAY_VER(display) >= 12)
2907 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2908 	else
2909 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2910 
2911 	if (display->vbt.override_afc_startup)
2912 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(display->vbt.override_afc_startup_val);
2913 }
2914 
2915 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2916 				    u32 *target_dco_khz,
2917 				    struct icl_dpll_hw_state *hw_state,
2918 				    bool is_dkl)
2919 {
2920 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2921 	u32 dco_min_freq, dco_max_freq;
2922 	unsigned int i;
2923 	int div2;
2924 
2925 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2926 	dco_max_freq = is_dp ? 8100000 : 10000000;
2927 
2928 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2929 		int div1 = div1_vals[i];
2930 
2931 		for (div2 = 10; div2 > 0; div2--) {
2932 			int dco = div1 * div2 * clock_khz * 5;
2933 			int a_divratio, tlinedrv, inputsel;
2934 			u32 hsdiv;
2935 
2936 			if (dco < dco_min_freq || dco > dco_max_freq)
2937 				continue;
2938 
2939 			if (div2 >= 2) {
2940 				/*
2941 				 * Note: a_divratio not matching TGL BSpec
2942 				 * algorithm but matching hardcoded values and
2943 				 * working on HW for DP alt-mode at least
2944 				 */
2945 				a_divratio = is_dp ? 10 : 5;
2946 				tlinedrv = is_dkl ? 1 : 2;
2947 			} else {
2948 				a_divratio = 5;
2949 				tlinedrv = 0;
2950 			}
2951 			inputsel = is_dp ? 0 : 1;
2952 
2953 			switch (div1) {
2954 			default:
2955 				MISSING_CASE(div1);
2956 				fallthrough;
2957 			case 2:
2958 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2959 				break;
2960 			case 3:
2961 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2962 				break;
2963 			case 5:
2964 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2965 				break;
2966 			case 7:
2967 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2968 				break;
2969 			}
2970 
2971 			*target_dco_khz = dco;
2972 
2973 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2974 
2975 			hw_state->mg_clktop2_coreclkctl1 =
2976 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2977 
2978 			hw_state->mg_clktop2_hsclkctl =
2979 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2980 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2981 				hsdiv |
2982 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2983 
2984 			return 0;
2985 		}
2986 	}
2987 
2988 	return -EINVAL;
2989 }
2990 
2991 /*
2992  * The specification for this function uses real numbers, so the math had to be
2993  * adapted to integer-only calculation, that's why it looks so different.
2994  */
2995 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2996 				 struct intel_dpll_hw_state *dpll_hw_state)
2997 {
2998 	struct intel_display *display = to_intel_display(crtc_state);
2999 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3000 	int refclk_khz = display->dpll.ref_clks.nssc;
3001 	int clock = crtc_state->port_clock;
3002 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3003 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3004 	u32 prop_coeff, int_coeff;
3005 	u32 tdc_targetcnt, feedfwgain;
3006 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3007 	u64 tmp;
3008 	bool use_ssc = false;
3009 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3010 	bool is_dkl = DISPLAY_VER(display) >= 12;
3011 	int ret;
3012 
3013 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3014 				       hw_state, is_dkl);
3015 	if (ret)
3016 		return ret;
3017 
3018 	m1div = 2;
3019 	m2div_int = dco_khz / (refclk_khz * m1div);
3020 	if (m2div_int > 255) {
3021 		if (!is_dkl) {
3022 			m1div = 4;
3023 			m2div_int = dco_khz / (refclk_khz * m1div);
3024 		}
3025 
3026 		if (m2div_int > 255)
3027 			return -EINVAL;
3028 	}
3029 	m2div_rem = dco_khz % (refclk_khz * m1div);
3030 
3031 	tmp = (u64)m2div_rem * (1 << 22);
3032 	do_div(tmp, refclk_khz * m1div);
3033 	m2div_frac = tmp;
3034 
3035 	switch (refclk_khz) {
3036 	case 19200:
3037 		iref_ndiv = 1;
3038 		iref_trim = 28;
3039 		iref_pulse_w = 1;
3040 		break;
3041 	case 24000:
3042 		iref_ndiv = 1;
3043 		iref_trim = 25;
3044 		iref_pulse_w = 2;
3045 		break;
3046 	case 38400:
3047 		iref_ndiv = 2;
3048 		iref_trim = 28;
3049 		iref_pulse_w = 1;
3050 		break;
3051 	default:
3052 		MISSING_CASE(refclk_khz);
3053 		return -EINVAL;
3054 	}
3055 
3056 	/*
3057 	 * tdc_res = 0.000003
3058 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3059 	 *
3060 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3061 	 * was supposed to be a division, but we rearranged the operations of
3062 	 * the formula to avoid early divisions so we don't multiply the
3063 	 * rounding errors.
3064 	 *
3065 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3066 	 * we also rearrange to work with integers.
3067 	 *
3068 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3069 	 * last division by 10.
3070 	 */
3071 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3072 
3073 	/*
3074 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3075 	 * 32 bits. That's not a problem since we round the division down
3076 	 * anyway.
3077 	 */
3078 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3079 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3080 
3081 	if (dco_khz >= 9000000) {
3082 		prop_coeff = 5;
3083 		int_coeff = 10;
3084 	} else {
3085 		prop_coeff = 4;
3086 		int_coeff = 8;
3087 	}
3088 
3089 	if (use_ssc) {
3090 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3091 		do_div(tmp, refclk_khz * m1div * 10000);
3092 		ssc_stepsize = tmp;
3093 
3094 		tmp = mul_u32_u32(dco_khz, 1000);
3095 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3096 	} else {
3097 		ssc_stepsize = 0;
3098 		ssc_steplen = 0;
3099 	}
3100 	ssc_steplog = 4;
3101 
3102 	/* write pll_state calculations */
3103 	if (is_dkl) {
3104 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3105 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3106 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3107 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3108 		if (display->vbt.override_afc_startup) {
3109 			u8 val = display->vbt.override_afc_startup_val;
3110 
3111 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3112 		}
3113 
3114 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3115 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3116 
3117 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3118 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3119 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3120 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3121 
3122 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3123 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3124 
3125 		hw_state->mg_pll_tdc_coldst_bias =
3126 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3127 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3128 
3129 	} else {
3130 		hw_state->mg_pll_div0 =
3131 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3132 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3133 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3134 
3135 		hw_state->mg_pll_div1 =
3136 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3137 			MG_PLL_DIV1_DITHER_DIV_2 |
3138 			MG_PLL_DIV1_NDIVRATIO(1) |
3139 			MG_PLL_DIV1_FBPREDIV(m1div);
3140 
3141 		hw_state->mg_pll_lf =
3142 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3143 			MG_PLL_LF_AFCCNTSEL_512 |
3144 			MG_PLL_LF_GAINCTRL(1) |
3145 			MG_PLL_LF_INT_COEFF(int_coeff) |
3146 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3147 
3148 		hw_state->mg_pll_frac_lock =
3149 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3150 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3151 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3152 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3153 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3154 		if (use_ssc || m2div_rem > 0)
3155 			hw_state->mg_pll_frac_lock |=
3156 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3157 
3158 		hw_state->mg_pll_ssc =
3159 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3160 			MG_PLL_SSC_TYPE(2) |
3161 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3162 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3163 			MG_PLL_SSC_FLLEN |
3164 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3165 
3166 		hw_state->mg_pll_tdc_coldst_bias =
3167 			MG_PLL_TDC_COLDST_COLDSTART |
3168 			MG_PLL_TDC_COLDST_IREFINT_EN |
3169 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3170 			MG_PLL_TDC_TDCOVCCORR_EN |
3171 			MG_PLL_TDC_TDCSEL(3);
3172 
3173 		hw_state->mg_pll_bias =
3174 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3175 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3176 			MG_PLL_BIAS_BIAS_BONUS(10) |
3177 			MG_PLL_BIAS_BIASCAL_EN |
3178 			MG_PLL_BIAS_CTRIM(12) |
3179 			MG_PLL_BIAS_VREF_RDAC(4) |
3180 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3181 
3182 		if (refclk_khz == 38400) {
3183 			hw_state->mg_pll_tdc_coldst_bias_mask =
3184 				MG_PLL_TDC_COLDST_COLDSTART;
3185 			hw_state->mg_pll_bias_mask = 0;
3186 		} else {
3187 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3188 			hw_state->mg_pll_bias_mask = -1U;
3189 		}
3190 
3191 		hw_state->mg_pll_tdc_coldst_bias &=
3192 			hw_state->mg_pll_tdc_coldst_bias_mask;
3193 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3194 	}
3195 
3196 	return 0;
3197 }
3198 
3199 static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
3200 				   const struct intel_shared_dpll *pll,
3201 				   const struct intel_dpll_hw_state *dpll_hw_state)
3202 {
3203 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3204 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3205 	u64 tmp;
3206 
3207 	ref_clock = display->dpll.ref_clks.nssc;
3208 
3209 	if (DISPLAY_VER(display) >= 12) {
3210 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3211 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3212 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3213 
3214 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3215 			m2_frac = hw_state->mg_pll_bias &
3216 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3217 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3218 		} else {
3219 			m2_frac = 0;
3220 		}
3221 	} else {
3222 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3223 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3224 
3225 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3226 			m2_frac = hw_state->mg_pll_div0 &
3227 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3228 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3229 		} else {
3230 			m2_frac = 0;
3231 		}
3232 	}
3233 
3234 	switch (hw_state->mg_clktop2_hsclkctl &
3235 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3236 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3237 		div1 = 2;
3238 		break;
3239 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3240 		div1 = 3;
3241 		break;
3242 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3243 		div1 = 5;
3244 		break;
3245 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3246 		div1 = 7;
3247 		break;
3248 	default:
3249 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3250 		return 0;
3251 	}
3252 
3253 	div2 = (hw_state->mg_clktop2_hsclkctl &
3254 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3255 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3256 
3257 	/* div2 value of 0 is same as 1 means no div */
3258 	if (div2 == 0)
3259 		div2 = 1;
3260 
3261 	/*
3262 	 * Adjust the original formula to delay the division by 2^22 in order to
3263 	 * minimize possible rounding errors.
3264 	 */
3265 	tmp = (u64)m1 * m2_int * ref_clock +
3266 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3267 	tmp = div_u64(tmp, 5 * div1 * div2);
3268 
3269 	return tmp;
3270 }
3271 
3272 /**
3273  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3274  * @crtc_state: state for the CRTC to select the DPLL for
3275  * @port_dpll_id: the active @port_dpll_id to select
3276  *
3277  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3278  * CRTC.
3279  */
3280 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3281 			      enum icl_port_dpll_id port_dpll_id)
3282 {
3283 	struct icl_port_dpll *port_dpll =
3284 		&crtc_state->icl_port_dplls[port_dpll_id];
3285 
3286 	crtc_state->shared_dpll = port_dpll->pll;
3287 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3288 }
3289 
3290 static void icl_update_active_dpll(struct intel_atomic_state *state,
3291 				   struct intel_crtc *crtc,
3292 				   struct intel_encoder *encoder)
3293 {
3294 	struct intel_crtc_state *crtc_state =
3295 		intel_atomic_get_new_crtc_state(state, crtc);
3296 	struct intel_digital_port *primary_port;
3297 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3298 
3299 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3300 		enc_to_mst(encoder)->primary :
3301 		enc_to_dig_port(encoder);
3302 
3303 	if (primary_port &&
3304 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3305 	     intel_tc_port_in_legacy_mode(primary_port)))
3306 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3307 
3308 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3309 }
3310 
3311 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3312 				      struct intel_crtc *crtc)
3313 {
3314 	struct intel_display *display = to_intel_display(state);
3315 	struct intel_crtc_state *crtc_state =
3316 		intel_atomic_get_new_crtc_state(state, crtc);
3317 	struct icl_port_dpll *port_dpll =
3318 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3319 	struct skl_wrpll_params pll_params = {};
3320 	int ret;
3321 
3322 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3323 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3324 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3325 	else
3326 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3327 
3328 	if (ret)
3329 		return ret;
3330 
3331 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3332 
3333 	/* this is mainly for the fastset check */
3334 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3335 
3336 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(display, NULL,
3337 							    &port_dpll->hw_state);
3338 
3339 	return 0;
3340 }
3341 
3342 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3343 				  struct intel_crtc *crtc,
3344 				  struct intel_encoder *encoder)
3345 {
3346 	struct intel_display *display = to_intel_display(crtc);
3347 	struct intel_crtc_state *crtc_state =
3348 		intel_atomic_get_new_crtc_state(state, crtc);
3349 	struct icl_port_dpll *port_dpll =
3350 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3351 	enum port port = encoder->port;
3352 	unsigned long dpll_mask;
3353 
3354 	if (display->platform.alderlake_s) {
3355 		dpll_mask =
3356 			BIT(DPLL_ID_DG1_DPLL3) |
3357 			BIT(DPLL_ID_DG1_DPLL2) |
3358 			BIT(DPLL_ID_ICL_DPLL1) |
3359 			BIT(DPLL_ID_ICL_DPLL0);
3360 	} else if (display->platform.dg1) {
3361 		if (port == PORT_D || port == PORT_E) {
3362 			dpll_mask =
3363 				BIT(DPLL_ID_DG1_DPLL2) |
3364 				BIT(DPLL_ID_DG1_DPLL3);
3365 		} else {
3366 			dpll_mask =
3367 				BIT(DPLL_ID_DG1_DPLL0) |
3368 				BIT(DPLL_ID_DG1_DPLL1);
3369 		}
3370 	} else if (display->platform.rocketlake) {
3371 		dpll_mask =
3372 			BIT(DPLL_ID_EHL_DPLL4) |
3373 			BIT(DPLL_ID_ICL_DPLL1) |
3374 			BIT(DPLL_ID_ICL_DPLL0);
3375 	} else if ((display->platform.jasperlake ||
3376 		    display->platform.elkhartlake) &&
3377 		   port != PORT_A) {
3378 		dpll_mask =
3379 			BIT(DPLL_ID_EHL_DPLL4) |
3380 			BIT(DPLL_ID_ICL_DPLL1) |
3381 			BIT(DPLL_ID_ICL_DPLL0);
3382 	} else {
3383 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3384 	}
3385 
3386 	/* Eliminate DPLLs from consideration if reserved by HTI */
3387 	dpll_mask &= ~intel_hti_dpll_mask(display);
3388 
3389 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3390 						&port_dpll->hw_state,
3391 						dpll_mask);
3392 	if (!port_dpll->pll)
3393 		return -EINVAL;
3394 
3395 	intel_reference_shared_dpll(state, crtc,
3396 				    port_dpll->pll, &port_dpll->hw_state);
3397 
3398 	icl_update_active_dpll(state, crtc, encoder);
3399 
3400 	return 0;
3401 }
3402 
3403 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3404 				    struct intel_crtc *crtc)
3405 {
3406 	struct intel_display *display = to_intel_display(state);
3407 	struct intel_crtc_state *crtc_state =
3408 		intel_atomic_get_new_crtc_state(state, crtc);
3409 	const struct intel_crtc_state *old_crtc_state =
3410 		intel_atomic_get_old_crtc_state(state, crtc);
3411 	struct icl_port_dpll *port_dpll =
3412 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3413 	struct skl_wrpll_params pll_params = {};
3414 	int ret;
3415 
3416 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3417 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3418 	if (ret)
3419 		return ret;
3420 
3421 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3422 
3423 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3424 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3425 	if (ret)
3426 		return ret;
3427 
3428 	/* this is mainly for the fastset check */
3429 	if (old_crtc_state->shared_dpll &&
3430 	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3431 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3432 	else
3433 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3434 
3435 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(display, NULL,
3436 							 &port_dpll->hw_state);
3437 
3438 	return 0;
3439 }
3440 
3441 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3442 				struct intel_crtc *crtc,
3443 				struct intel_encoder *encoder)
3444 {
3445 	struct intel_crtc_state *crtc_state =
3446 		intel_atomic_get_new_crtc_state(state, crtc);
3447 	struct icl_port_dpll *port_dpll =
3448 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3449 	enum intel_dpll_id dpll_id;
3450 	int ret;
3451 
3452 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3453 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3454 						&port_dpll->hw_state,
3455 						BIT(DPLL_ID_ICL_TBTPLL));
3456 	if (!port_dpll->pll)
3457 		return -EINVAL;
3458 	intel_reference_shared_dpll(state, crtc,
3459 				    port_dpll->pll, &port_dpll->hw_state);
3460 
3461 
3462 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3463 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3464 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3465 						&port_dpll->hw_state,
3466 						BIT(dpll_id));
3467 	if (!port_dpll->pll) {
3468 		ret = -EINVAL;
3469 		goto err_unreference_tbt_pll;
3470 	}
3471 	intel_reference_shared_dpll(state, crtc,
3472 				    port_dpll->pll, &port_dpll->hw_state);
3473 
3474 	icl_update_active_dpll(state, crtc, encoder);
3475 
3476 	return 0;
3477 
3478 err_unreference_tbt_pll:
3479 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3480 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3481 
3482 	return ret;
3483 }
3484 
3485 static int icl_compute_dplls(struct intel_atomic_state *state,
3486 			     struct intel_crtc *crtc,
3487 			     struct intel_encoder *encoder)
3488 {
3489 	if (intel_encoder_is_combo(encoder))
3490 		return icl_compute_combo_phy_dpll(state, crtc);
3491 	else if (intel_encoder_is_tc(encoder))
3492 		return icl_compute_tc_phy_dplls(state, crtc);
3493 
3494 	MISSING_CASE(encoder->port);
3495 
3496 	return 0;
3497 }
3498 
3499 static int icl_get_dplls(struct intel_atomic_state *state,
3500 			 struct intel_crtc *crtc,
3501 			 struct intel_encoder *encoder)
3502 {
3503 	if (intel_encoder_is_combo(encoder))
3504 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3505 	else if (intel_encoder_is_tc(encoder))
3506 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3507 
3508 	MISSING_CASE(encoder->port);
3509 
3510 	return -EINVAL;
3511 }
3512 
3513 static void icl_put_dplls(struct intel_atomic_state *state,
3514 			  struct intel_crtc *crtc)
3515 {
3516 	const struct intel_crtc_state *old_crtc_state =
3517 		intel_atomic_get_old_crtc_state(state, crtc);
3518 	struct intel_crtc_state *new_crtc_state =
3519 		intel_atomic_get_new_crtc_state(state, crtc);
3520 	enum icl_port_dpll_id id;
3521 
3522 	new_crtc_state->shared_dpll = NULL;
3523 
3524 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3525 		const struct icl_port_dpll *old_port_dpll =
3526 			&old_crtc_state->icl_port_dplls[id];
3527 		struct icl_port_dpll *new_port_dpll =
3528 			&new_crtc_state->icl_port_dplls[id];
3529 
3530 		new_port_dpll->pll = NULL;
3531 
3532 		if (!old_port_dpll->pll)
3533 			continue;
3534 
3535 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3536 	}
3537 }
3538 
3539 static bool mg_pll_get_hw_state(struct intel_display *display,
3540 				struct intel_shared_dpll *pll,
3541 				struct intel_dpll_hw_state *dpll_hw_state)
3542 {
3543 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3544 	const enum intel_dpll_id id = pll->info->id;
3545 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3546 	intel_wakeref_t wakeref;
3547 	bool ret = false;
3548 	u32 val;
3549 
3550 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
3551 
3552 	wakeref = intel_display_power_get_if_enabled(display,
3553 						     POWER_DOMAIN_DISPLAY_CORE);
3554 	if (!wakeref)
3555 		return false;
3556 
3557 	val = intel_de_read(display, enable_reg);
3558 	if (!(val & PLL_ENABLE))
3559 		goto out;
3560 
3561 	hw_state->mg_refclkin_ctl = intel_de_read(display,
3562 						  MG_REFCLKIN_CTL(tc_port));
3563 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3564 
3565 	hw_state->mg_clktop2_coreclkctl1 =
3566 		intel_de_read(display, MG_CLKTOP2_CORECLKCTL1(tc_port));
3567 	hw_state->mg_clktop2_coreclkctl1 &=
3568 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3569 
3570 	hw_state->mg_clktop2_hsclkctl =
3571 		intel_de_read(display, MG_CLKTOP2_HSCLKCTL(tc_port));
3572 	hw_state->mg_clktop2_hsclkctl &=
3573 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3574 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3575 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3576 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3577 
3578 	hw_state->mg_pll_div0 = intel_de_read(display, MG_PLL_DIV0(tc_port));
3579 	hw_state->mg_pll_div1 = intel_de_read(display, MG_PLL_DIV1(tc_port));
3580 	hw_state->mg_pll_lf = intel_de_read(display, MG_PLL_LF(tc_port));
3581 	hw_state->mg_pll_frac_lock = intel_de_read(display,
3582 						   MG_PLL_FRAC_LOCK(tc_port));
3583 	hw_state->mg_pll_ssc = intel_de_read(display, MG_PLL_SSC(tc_port));
3584 
3585 	hw_state->mg_pll_bias = intel_de_read(display, MG_PLL_BIAS(tc_port));
3586 	hw_state->mg_pll_tdc_coldst_bias =
3587 		intel_de_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3588 
3589 	if (display->dpll.ref_clks.nssc == 38400) {
3590 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3591 		hw_state->mg_pll_bias_mask = 0;
3592 	} else {
3593 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3594 		hw_state->mg_pll_bias_mask = -1U;
3595 	}
3596 
3597 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3598 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3599 
3600 	ret = true;
3601 out:
3602 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3603 	return ret;
3604 }
3605 
3606 static bool dkl_pll_get_hw_state(struct intel_display *display,
3607 				 struct intel_shared_dpll *pll,
3608 				 struct intel_dpll_hw_state *dpll_hw_state)
3609 {
3610 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3611 	const enum intel_dpll_id id = pll->info->id;
3612 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3613 	intel_wakeref_t wakeref;
3614 	bool ret = false;
3615 	u32 val;
3616 
3617 	wakeref = intel_display_power_get_if_enabled(display,
3618 						     POWER_DOMAIN_DISPLAY_CORE);
3619 	if (!wakeref)
3620 		return false;
3621 
3622 	val = intel_de_read(display, intel_tc_pll_enable_reg(display, pll));
3623 	if (!(val & PLL_ENABLE))
3624 		goto out;
3625 
3626 	/*
3627 	 * All registers read here have the same HIP_INDEX_REG even though
3628 	 * they are on different building blocks
3629 	 */
3630 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(display,
3631 						       DKL_REFCLKIN_CTL(tc_port));
3632 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3633 
3634 	hw_state->mg_clktop2_hsclkctl =
3635 		intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3636 	hw_state->mg_clktop2_hsclkctl &=
3637 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3638 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3639 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3640 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3641 
3642 	hw_state->mg_clktop2_coreclkctl1 =
3643 		intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3644 	hw_state->mg_clktop2_coreclkctl1 &=
3645 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3646 
3647 	hw_state->mg_pll_div0 = intel_dkl_phy_read(display, DKL_PLL_DIV0(tc_port));
3648 	val = DKL_PLL_DIV0_MASK;
3649 	if (display->vbt.override_afc_startup)
3650 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3651 	hw_state->mg_pll_div0 &= val;
3652 
3653 	hw_state->mg_pll_div1 = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3654 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3655 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3656 
3657 	hw_state->mg_pll_ssc = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3658 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3659 				 DKL_PLL_SSC_STEP_LEN_MASK |
3660 				 DKL_PLL_SSC_STEP_NUM_MASK |
3661 				 DKL_PLL_SSC_EN);
3662 
3663 	hw_state->mg_pll_bias = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3664 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3665 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3666 
3667 	hw_state->mg_pll_tdc_coldst_bias =
3668 		intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3669 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3670 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3671 
3672 	ret = true;
3673 out:
3674 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3675 	return ret;
3676 }
3677 
3678 static bool icl_pll_get_hw_state(struct intel_display *display,
3679 				 struct intel_shared_dpll *pll,
3680 				 struct intel_dpll_hw_state *dpll_hw_state,
3681 				 i915_reg_t enable_reg)
3682 {
3683 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3684 	const enum intel_dpll_id id = pll->info->id;
3685 	intel_wakeref_t wakeref;
3686 	bool ret = false;
3687 	u32 val;
3688 
3689 	wakeref = intel_display_power_get_if_enabled(display,
3690 						     POWER_DOMAIN_DISPLAY_CORE);
3691 	if (!wakeref)
3692 		return false;
3693 
3694 	val = intel_de_read(display, enable_reg);
3695 	if (!(val & PLL_ENABLE))
3696 		goto out;
3697 
3698 	if (display->platform.alderlake_s) {
3699 		hw_state->cfgcr0 = intel_de_read(display, ADLS_DPLL_CFGCR0(id));
3700 		hw_state->cfgcr1 = intel_de_read(display, ADLS_DPLL_CFGCR1(id));
3701 	} else if (display->platform.dg1) {
3702 		hw_state->cfgcr0 = intel_de_read(display, DG1_DPLL_CFGCR0(id));
3703 		hw_state->cfgcr1 = intel_de_read(display, DG1_DPLL_CFGCR1(id));
3704 	} else if (display->platform.rocketlake) {
3705 		hw_state->cfgcr0 = intel_de_read(display,
3706 						 RKL_DPLL_CFGCR0(id));
3707 		hw_state->cfgcr1 = intel_de_read(display,
3708 						 RKL_DPLL_CFGCR1(id));
3709 	} else if (DISPLAY_VER(display) >= 12) {
3710 		hw_state->cfgcr0 = intel_de_read(display,
3711 						 TGL_DPLL_CFGCR0(id));
3712 		hw_state->cfgcr1 = intel_de_read(display,
3713 						 TGL_DPLL_CFGCR1(id));
3714 		if (display->vbt.override_afc_startup) {
3715 			hw_state->div0 = intel_de_read(display, TGL_DPLL0_DIV0(id));
3716 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3717 		}
3718 	} else {
3719 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3720 		    id == DPLL_ID_EHL_DPLL4) {
3721 			hw_state->cfgcr0 = intel_de_read(display,
3722 							 ICL_DPLL_CFGCR0(4));
3723 			hw_state->cfgcr1 = intel_de_read(display,
3724 							 ICL_DPLL_CFGCR1(4));
3725 		} else {
3726 			hw_state->cfgcr0 = intel_de_read(display,
3727 							 ICL_DPLL_CFGCR0(id));
3728 			hw_state->cfgcr1 = intel_de_read(display,
3729 							 ICL_DPLL_CFGCR1(id));
3730 		}
3731 	}
3732 
3733 	ret = true;
3734 out:
3735 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3736 	return ret;
3737 }
3738 
3739 static bool combo_pll_get_hw_state(struct intel_display *display,
3740 				   struct intel_shared_dpll *pll,
3741 				   struct intel_dpll_hw_state *dpll_hw_state)
3742 {
3743 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3744 
3745 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
3746 }
3747 
3748 static bool tbt_pll_get_hw_state(struct intel_display *display,
3749 				 struct intel_shared_dpll *pll,
3750 				 struct intel_dpll_hw_state *dpll_hw_state)
3751 {
3752 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
3753 }
3754 
3755 static void icl_dpll_write(struct intel_display *display,
3756 			   struct intel_shared_dpll *pll,
3757 			   const struct icl_dpll_hw_state *hw_state)
3758 {
3759 	const enum intel_dpll_id id = pll->info->id;
3760 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3761 
3762 	if (display->platform.alderlake_s) {
3763 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3764 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3765 	} else if (display->platform.dg1) {
3766 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3767 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3768 	} else if (display->platform.rocketlake) {
3769 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3770 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3771 	} else if (DISPLAY_VER(display) >= 12) {
3772 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3773 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3774 		div0_reg = TGL_DPLL0_DIV0(id);
3775 	} else {
3776 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3777 		    id == DPLL_ID_EHL_DPLL4) {
3778 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3779 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3780 		} else {
3781 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3782 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3783 		}
3784 	}
3785 
3786 	intel_de_write(display, cfgcr0_reg, hw_state->cfgcr0);
3787 	intel_de_write(display, cfgcr1_reg, hw_state->cfgcr1);
3788 	drm_WARN_ON_ONCE(display->drm, display->vbt.override_afc_startup &&
3789 			 !i915_mmio_reg_valid(div0_reg));
3790 	if (display->vbt.override_afc_startup &&
3791 	    i915_mmio_reg_valid(div0_reg))
3792 		intel_de_rmw(display, div0_reg,
3793 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3794 	intel_de_posting_read(display, cfgcr1_reg);
3795 }
3796 
3797 static void icl_mg_pll_write(struct intel_display *display,
3798 			     struct intel_shared_dpll *pll,
3799 			     const struct icl_dpll_hw_state *hw_state)
3800 {
3801 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3802 
3803 	/*
3804 	 * Some of the following registers have reserved fields, so program
3805 	 * these with RMW based on a mask. The mask can be fixed or generated
3806 	 * during the calc/readout phase if the mask depends on some other HW
3807 	 * state like refclk, see icl_calc_mg_pll_state().
3808 	 */
3809 	intel_de_rmw(display, MG_REFCLKIN_CTL(tc_port),
3810 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3811 
3812 	intel_de_rmw(display, MG_CLKTOP2_CORECLKCTL1(tc_port),
3813 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3814 		     hw_state->mg_clktop2_coreclkctl1);
3815 
3816 	intel_de_rmw(display, MG_CLKTOP2_HSCLKCTL(tc_port),
3817 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3818 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3819 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3820 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3821 		     hw_state->mg_clktop2_hsclkctl);
3822 
3823 	intel_de_write(display, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3824 	intel_de_write(display, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3825 	intel_de_write(display, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3826 	intel_de_write(display, MG_PLL_FRAC_LOCK(tc_port),
3827 		       hw_state->mg_pll_frac_lock);
3828 	intel_de_write(display, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3829 
3830 	intel_de_rmw(display, MG_PLL_BIAS(tc_port),
3831 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3832 
3833 	intel_de_rmw(display, MG_PLL_TDC_COLDST_BIAS(tc_port),
3834 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3835 		     hw_state->mg_pll_tdc_coldst_bias);
3836 
3837 	intel_de_posting_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3838 }
3839 
3840 static void dkl_pll_write(struct intel_display *display,
3841 			  struct intel_shared_dpll *pll,
3842 			  const struct icl_dpll_hw_state *hw_state)
3843 {
3844 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3845 	u32 val;
3846 
3847 	/*
3848 	 * All registers programmed here have the same HIP_INDEX_REG even
3849 	 * though on different building block
3850 	 */
3851 	/* All the registers are RMW */
3852 	val = intel_dkl_phy_read(display, DKL_REFCLKIN_CTL(tc_port));
3853 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3854 	val |= hw_state->mg_refclkin_ctl;
3855 	intel_dkl_phy_write(display, DKL_REFCLKIN_CTL(tc_port), val);
3856 
3857 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3858 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3859 	val |= hw_state->mg_clktop2_coreclkctl1;
3860 	intel_dkl_phy_write(display, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3861 
3862 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3863 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3864 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3865 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3866 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3867 	val |= hw_state->mg_clktop2_hsclkctl;
3868 	intel_dkl_phy_write(display, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3869 
3870 	val = DKL_PLL_DIV0_MASK;
3871 	if (display->vbt.override_afc_startup)
3872 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3873 	intel_dkl_phy_rmw(display, DKL_PLL_DIV0(tc_port), val,
3874 			  hw_state->mg_pll_div0);
3875 
3876 	val = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3877 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3878 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3879 	val |= hw_state->mg_pll_div1;
3880 	intel_dkl_phy_write(display, DKL_PLL_DIV1(tc_port), val);
3881 
3882 	val = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3883 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3884 		 DKL_PLL_SSC_STEP_LEN_MASK |
3885 		 DKL_PLL_SSC_STEP_NUM_MASK |
3886 		 DKL_PLL_SSC_EN);
3887 	val |= hw_state->mg_pll_ssc;
3888 	intel_dkl_phy_write(display, DKL_PLL_SSC(tc_port), val);
3889 
3890 	val = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3891 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3892 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3893 	val |= hw_state->mg_pll_bias;
3894 	intel_dkl_phy_write(display, DKL_PLL_BIAS(tc_port), val);
3895 
3896 	val = intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3897 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3898 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3899 	val |= hw_state->mg_pll_tdc_coldst_bias;
3900 	intel_dkl_phy_write(display, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3901 
3902 	intel_dkl_phy_posting_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3903 }
3904 
3905 static void icl_pll_power_enable(struct intel_display *display,
3906 				 struct intel_shared_dpll *pll,
3907 				 i915_reg_t enable_reg)
3908 {
3909 	intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
3910 
3911 	/*
3912 	 * The spec says we need to "wait" but it also says it should be
3913 	 * immediate.
3914 	 */
3915 	if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1))
3916 		drm_err(display->drm, "PLL %d Power not enabled\n",
3917 			pll->info->id);
3918 }
3919 
3920 static void icl_pll_enable(struct intel_display *display,
3921 			   struct intel_shared_dpll *pll,
3922 			   i915_reg_t enable_reg)
3923 {
3924 	intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
3925 
3926 	/* Timeout is actually 600us. */
3927 	if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1))
3928 		drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
3929 }
3930 
3931 static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_shared_dpll *pll)
3932 {
3933 	u32 val;
3934 
3935 	if (!(display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) ||
3936 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3937 		return;
3938 	/*
3939 	 * Wa_16011069516:adl-p[a0]
3940 	 *
3941 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3942 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3943 	 * sanity check this assumption with a double read, which presumably
3944 	 * returns the correct value even with clock gating on.
3945 	 *
3946 	 * Instead of the usual place for workarounds we apply this one here,
3947 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3948 	 */
3949 	val = intel_de_read(display, TRANS_CMTG_CHICKEN);
3950 	val = intel_de_rmw(display, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3951 	if (drm_WARN_ON(display->drm, val & ~DISABLE_DPT_CLK_GATING))
3952 		drm_dbg_kms(display->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3953 }
3954 
3955 static void combo_pll_enable(struct intel_display *display,
3956 			     struct intel_shared_dpll *pll,
3957 			     const struct intel_dpll_hw_state *dpll_hw_state)
3958 {
3959 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3960 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3961 
3962 	icl_pll_power_enable(display, pll, enable_reg);
3963 
3964 	icl_dpll_write(display, pll, hw_state);
3965 
3966 	/*
3967 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3968 	 * paths should already be setting the appropriate voltage, hence we do
3969 	 * nothing here.
3970 	 */
3971 
3972 	icl_pll_enable(display, pll, enable_reg);
3973 
3974 	adlp_cmtg_clock_gating_wa(display, pll);
3975 
3976 	/* DVFS post sequence would be here. See the comment above. */
3977 }
3978 
3979 static void tbt_pll_enable(struct intel_display *display,
3980 			   struct intel_shared_dpll *pll,
3981 			   const struct intel_dpll_hw_state *dpll_hw_state)
3982 {
3983 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3984 
3985 	icl_pll_power_enable(display, pll, TBT_PLL_ENABLE);
3986 
3987 	icl_dpll_write(display, pll, hw_state);
3988 
3989 	/*
3990 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3991 	 * paths should already be setting the appropriate voltage, hence we do
3992 	 * nothing here.
3993 	 */
3994 
3995 	icl_pll_enable(display, pll, TBT_PLL_ENABLE);
3996 
3997 	/* DVFS post sequence would be here. See the comment above. */
3998 }
3999 
4000 static void mg_pll_enable(struct intel_display *display,
4001 			  struct intel_shared_dpll *pll,
4002 			  const struct intel_dpll_hw_state *dpll_hw_state)
4003 {
4004 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4005 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4006 
4007 	icl_pll_power_enable(display, pll, enable_reg);
4008 
4009 	if (DISPLAY_VER(display) >= 12)
4010 		dkl_pll_write(display, pll, hw_state);
4011 	else
4012 		icl_mg_pll_write(display, pll, hw_state);
4013 
4014 	/*
4015 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4016 	 * paths should already be setting the appropriate voltage, hence we do
4017 	 * nothing here.
4018 	 */
4019 
4020 	icl_pll_enable(display, pll, enable_reg);
4021 
4022 	/* DVFS post sequence would be here. See the comment above. */
4023 }
4024 
4025 static void icl_pll_disable(struct intel_display *display,
4026 			    struct intel_shared_dpll *pll,
4027 			    i915_reg_t enable_reg)
4028 {
4029 	/* The first steps are done by intel_ddi_post_disable(). */
4030 
4031 	/*
4032 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4033 	 * paths should already be setting the appropriate voltage, hence we do
4034 	 * nothing here.
4035 	 */
4036 
4037 	intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
4038 
4039 	/* Timeout is actually 1us. */
4040 	if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1))
4041 		drm_err(display->drm, "PLL %d locked\n", pll->info->id);
4042 
4043 	/* DVFS post sequence would be here. See the comment above. */
4044 
4045 	intel_de_rmw(display, enable_reg, PLL_POWER_ENABLE, 0);
4046 
4047 	/*
4048 	 * The spec says we need to "wait" but it also says it should be
4049 	 * immediate.
4050 	 */
4051 	if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1))
4052 		drm_err(display->drm, "PLL %d Power not disabled\n",
4053 			pll->info->id);
4054 }
4055 
4056 static void combo_pll_disable(struct intel_display *display,
4057 			      struct intel_shared_dpll *pll)
4058 {
4059 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
4060 
4061 	icl_pll_disable(display, pll, enable_reg);
4062 }
4063 
4064 static void tbt_pll_disable(struct intel_display *display,
4065 			    struct intel_shared_dpll *pll)
4066 {
4067 	icl_pll_disable(display, pll, TBT_PLL_ENABLE);
4068 }
4069 
4070 static void mg_pll_disable(struct intel_display *display,
4071 			   struct intel_shared_dpll *pll)
4072 {
4073 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4074 
4075 	icl_pll_disable(display, pll, enable_reg);
4076 }
4077 
4078 static void icl_update_dpll_ref_clks(struct intel_display *display)
4079 {
4080 	/* No SSC ref */
4081 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
4082 }
4083 
4084 static void icl_dump_hw_state(struct drm_printer *p,
4085 			      const struct intel_dpll_hw_state *dpll_hw_state)
4086 {
4087 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4088 
4089 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4090 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4091 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4092 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4093 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4094 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4095 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4096 		   hw_state->mg_refclkin_ctl,
4097 		   hw_state->mg_clktop2_coreclkctl1,
4098 		   hw_state->mg_clktop2_hsclkctl,
4099 		   hw_state->mg_pll_div0,
4100 		   hw_state->mg_pll_div1,
4101 		   hw_state->mg_pll_lf,
4102 		   hw_state->mg_pll_frac_lock,
4103 		   hw_state->mg_pll_ssc,
4104 		   hw_state->mg_pll_bias,
4105 		   hw_state->mg_pll_tdc_coldst_bias);
4106 }
4107 
4108 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4109 				 const struct intel_dpll_hw_state *_b)
4110 {
4111 	const struct icl_dpll_hw_state *a = &_a->icl;
4112 	const struct icl_dpll_hw_state *b = &_b->icl;
4113 
4114 	/* FIXME split combo vs. mg more thoroughly */
4115 	return a->cfgcr0 == b->cfgcr0 &&
4116 		a->cfgcr1 == b->cfgcr1 &&
4117 		a->div0 == b->div0 &&
4118 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4119 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4120 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4121 		a->mg_pll_div0 == b->mg_pll_div0 &&
4122 		a->mg_pll_div1 == b->mg_pll_div1 &&
4123 		a->mg_pll_lf == b->mg_pll_lf &&
4124 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4125 		a->mg_pll_ssc == b->mg_pll_ssc &&
4126 		a->mg_pll_bias == b->mg_pll_bias &&
4127 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4128 }
4129 
4130 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4131 	.enable = combo_pll_enable,
4132 	.disable = combo_pll_disable,
4133 	.get_hw_state = combo_pll_get_hw_state,
4134 	.get_freq = icl_ddi_combo_pll_get_freq,
4135 };
4136 
4137 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4138 	.enable = tbt_pll_enable,
4139 	.disable = tbt_pll_disable,
4140 	.get_hw_state = tbt_pll_get_hw_state,
4141 	.get_freq = icl_ddi_tbt_pll_get_freq,
4142 };
4143 
4144 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4145 	.enable = mg_pll_enable,
4146 	.disable = mg_pll_disable,
4147 	.get_hw_state = mg_pll_get_hw_state,
4148 	.get_freq = icl_ddi_mg_pll_get_freq,
4149 };
4150 
4151 static const struct dpll_info icl_plls[] = {
4152 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4153 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4154 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4155 	  .is_alt_port_dpll = true, },
4156 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4157 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4158 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4159 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4160 	{}
4161 };
4162 
4163 static const struct intel_dpll_mgr icl_pll_mgr = {
4164 	.dpll_info = icl_plls,
4165 	.compute_dplls = icl_compute_dplls,
4166 	.get_dplls = icl_get_dplls,
4167 	.put_dplls = icl_put_dplls,
4168 	.update_active_dpll = icl_update_active_dpll,
4169 	.update_ref_clks = icl_update_dpll_ref_clks,
4170 	.dump_hw_state = icl_dump_hw_state,
4171 	.compare_hw_state = icl_compare_hw_state,
4172 };
4173 
4174 static const struct dpll_info ehl_plls[] = {
4175 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4176 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4177 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4178 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4179 	{}
4180 };
4181 
4182 static const struct intel_dpll_mgr ehl_pll_mgr = {
4183 	.dpll_info = ehl_plls,
4184 	.compute_dplls = icl_compute_dplls,
4185 	.get_dplls = icl_get_dplls,
4186 	.put_dplls = icl_put_dplls,
4187 	.update_ref_clks = icl_update_dpll_ref_clks,
4188 	.dump_hw_state = icl_dump_hw_state,
4189 	.compare_hw_state = icl_compare_hw_state,
4190 };
4191 
4192 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4193 	.enable = mg_pll_enable,
4194 	.disable = mg_pll_disable,
4195 	.get_hw_state = dkl_pll_get_hw_state,
4196 	.get_freq = icl_ddi_mg_pll_get_freq,
4197 };
4198 
4199 static const struct dpll_info tgl_plls[] = {
4200 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4201 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4202 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4203 	  .is_alt_port_dpll = true, },
4204 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4205 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4206 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4207 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4208 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4209 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4210 	{}
4211 };
4212 
4213 static const struct intel_dpll_mgr tgl_pll_mgr = {
4214 	.dpll_info = tgl_plls,
4215 	.compute_dplls = icl_compute_dplls,
4216 	.get_dplls = icl_get_dplls,
4217 	.put_dplls = icl_put_dplls,
4218 	.update_active_dpll = icl_update_active_dpll,
4219 	.update_ref_clks = icl_update_dpll_ref_clks,
4220 	.dump_hw_state = icl_dump_hw_state,
4221 	.compare_hw_state = icl_compare_hw_state,
4222 };
4223 
4224 static const struct dpll_info rkl_plls[] = {
4225 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4226 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4227 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4228 	{}
4229 };
4230 
4231 static const struct intel_dpll_mgr rkl_pll_mgr = {
4232 	.dpll_info = rkl_plls,
4233 	.compute_dplls = icl_compute_dplls,
4234 	.get_dplls = icl_get_dplls,
4235 	.put_dplls = icl_put_dplls,
4236 	.update_ref_clks = icl_update_dpll_ref_clks,
4237 	.dump_hw_state = icl_dump_hw_state,
4238 	.compare_hw_state = icl_compare_hw_state,
4239 };
4240 
4241 static const struct dpll_info dg1_plls[] = {
4242 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4243 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4244 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4245 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4246 	{}
4247 };
4248 
4249 static const struct intel_dpll_mgr dg1_pll_mgr = {
4250 	.dpll_info = dg1_plls,
4251 	.compute_dplls = icl_compute_dplls,
4252 	.get_dplls = icl_get_dplls,
4253 	.put_dplls = icl_put_dplls,
4254 	.update_ref_clks = icl_update_dpll_ref_clks,
4255 	.dump_hw_state = icl_dump_hw_state,
4256 	.compare_hw_state = icl_compare_hw_state,
4257 };
4258 
4259 static const struct dpll_info adls_plls[] = {
4260 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4261 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4262 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4263 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4264 	{}
4265 };
4266 
4267 static const struct intel_dpll_mgr adls_pll_mgr = {
4268 	.dpll_info = adls_plls,
4269 	.compute_dplls = icl_compute_dplls,
4270 	.get_dplls = icl_get_dplls,
4271 	.put_dplls = icl_put_dplls,
4272 	.update_ref_clks = icl_update_dpll_ref_clks,
4273 	.dump_hw_state = icl_dump_hw_state,
4274 	.compare_hw_state = icl_compare_hw_state,
4275 };
4276 
4277 static const struct dpll_info adlp_plls[] = {
4278 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4279 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4280 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4281 	  .is_alt_port_dpll = true, },
4282 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4283 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4284 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4285 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4286 	{}
4287 };
4288 
4289 static const struct intel_dpll_mgr adlp_pll_mgr = {
4290 	.dpll_info = adlp_plls,
4291 	.compute_dplls = icl_compute_dplls,
4292 	.get_dplls = icl_get_dplls,
4293 	.put_dplls = icl_put_dplls,
4294 	.update_active_dpll = icl_update_active_dpll,
4295 	.update_ref_clks = icl_update_dpll_ref_clks,
4296 	.dump_hw_state = icl_dump_hw_state,
4297 	.compare_hw_state = icl_compare_hw_state,
4298 };
4299 
4300 /**
4301  * intel_shared_dpll_init - Initialize shared DPLLs
4302  * @display: intel_display device
4303  *
4304  * Initialize shared DPLLs for @display.
4305  */
4306 void intel_shared_dpll_init(struct intel_display *display)
4307 {
4308 	struct drm_i915_private *i915 = to_i915(display->drm);
4309 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4310 	const struct dpll_info *dpll_info;
4311 	int i;
4312 
4313 	mutex_init(&display->dpll.lock);
4314 
4315 	if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
4316 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4317 		dpll_mgr = NULL;
4318 	else if (display->platform.alderlake_p)
4319 		dpll_mgr = &adlp_pll_mgr;
4320 	else if (display->platform.alderlake_s)
4321 		dpll_mgr = &adls_pll_mgr;
4322 	else if (display->platform.dg1)
4323 		dpll_mgr = &dg1_pll_mgr;
4324 	else if (display->platform.rocketlake)
4325 		dpll_mgr = &rkl_pll_mgr;
4326 	else if (DISPLAY_VER(display) >= 12)
4327 		dpll_mgr = &tgl_pll_mgr;
4328 	else if (display->platform.jasperlake || display->platform.elkhartlake)
4329 		dpll_mgr = &ehl_pll_mgr;
4330 	else if (DISPLAY_VER(display) >= 11)
4331 		dpll_mgr = &icl_pll_mgr;
4332 	else if (display->platform.geminilake || display->platform.broxton)
4333 		dpll_mgr = &bxt_pll_mgr;
4334 	else if (DISPLAY_VER(display) == 9)
4335 		dpll_mgr = &skl_pll_mgr;
4336 	else if (HAS_DDI(display))
4337 		dpll_mgr = &hsw_pll_mgr;
4338 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4339 		dpll_mgr = &pch_pll_mgr;
4340 
4341 	if (!dpll_mgr)
4342 		return;
4343 
4344 	dpll_info = dpll_mgr->dpll_info;
4345 
4346 	for (i = 0; dpll_info[i].name; i++) {
4347 		if (drm_WARN_ON(display->drm,
4348 				i >= ARRAY_SIZE(display->dpll.shared_dplls)))
4349 			break;
4350 
4351 		/* must fit into unsigned long bitmask on 32bit */
4352 		if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
4353 			break;
4354 
4355 		display->dpll.shared_dplls[i].info = &dpll_info[i];
4356 		display->dpll.shared_dplls[i].index = i;
4357 	}
4358 
4359 	display->dpll.mgr = dpll_mgr;
4360 	display->dpll.num_shared_dpll = i;
4361 }
4362 
4363 /**
4364  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4365  * @state: atomic state
4366  * @crtc: CRTC to compute DPLLs for
4367  * @encoder: encoder
4368  *
4369  * This function computes the DPLL state for the given CRTC and encoder.
4370  *
4371  * The new configuration in the atomic commit @state is made effective by
4372  * calling intel_shared_dpll_swap_state().
4373  *
4374  * Returns:
4375  * 0 on success, negative error code on failure.
4376  */
4377 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4378 			       struct intel_crtc *crtc,
4379 			       struct intel_encoder *encoder)
4380 {
4381 	struct intel_display *display = to_intel_display(state);
4382 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4383 
4384 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4385 		return -EINVAL;
4386 
4387 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4388 }
4389 
4390 /**
4391  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4392  * @state: atomic state
4393  * @crtc: CRTC to reserve DPLLs for
4394  * @encoder: encoder
4395  *
4396  * This function reserves all required DPLLs for the given CRTC and encoder
4397  * combination in the current atomic commit @state and the new @crtc atomic
4398  * state.
4399  *
4400  * The new configuration in the atomic commit @state is made effective by
4401  * calling intel_shared_dpll_swap_state().
4402  *
4403  * The reserved DPLLs should be released by calling
4404  * intel_release_shared_dplls().
4405  *
4406  * Returns:
4407  * 0 if all required DPLLs were successfully reserved,
4408  * negative error code otherwise.
4409  */
4410 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4411 			       struct intel_crtc *crtc,
4412 			       struct intel_encoder *encoder)
4413 {
4414 	struct intel_display *display = to_intel_display(state);
4415 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4416 
4417 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4418 		return -EINVAL;
4419 
4420 	return dpll_mgr->get_dplls(state, crtc, encoder);
4421 }
4422 
4423 /**
4424  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4425  * @state: atomic state
4426  * @crtc: crtc from which the DPLLs are to be released
4427  *
4428  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4429  * from the current atomic commit @state and the old @crtc atomic state.
4430  *
4431  * The new configuration in the atomic commit @state is made effective by
4432  * calling intel_shared_dpll_swap_state().
4433  */
4434 void intel_release_shared_dplls(struct intel_atomic_state *state,
4435 				struct intel_crtc *crtc)
4436 {
4437 	struct intel_display *display = to_intel_display(state);
4438 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4439 
4440 	/*
4441 	 * FIXME: this function is called for every platform having a
4442 	 * compute_clock hook, even though the platform doesn't yet support
4443 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4444 	 * called on those.
4445 	 */
4446 	if (!dpll_mgr)
4447 		return;
4448 
4449 	dpll_mgr->put_dplls(state, crtc);
4450 }
4451 
4452 /**
4453  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4454  * @state: atomic state
4455  * @crtc: the CRTC for which to update the active DPLL
4456  * @encoder: encoder determining the type of port DPLL
4457  *
4458  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4459  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4460  * DPLL selected will be based on the current mode of the encoder's port.
4461  */
4462 void intel_update_active_dpll(struct intel_atomic_state *state,
4463 			      struct intel_crtc *crtc,
4464 			      struct intel_encoder *encoder)
4465 {
4466 	struct intel_display *display = to_intel_display(encoder);
4467 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4468 
4469 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4470 		return;
4471 
4472 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4473 }
4474 
4475 /**
4476  * intel_dpll_get_freq - calculate the DPLL's output frequency
4477  * @display: intel_display device
4478  * @pll: DPLL for which to calculate the output frequency
4479  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4480  *
4481  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4482  */
4483 int intel_dpll_get_freq(struct intel_display *display,
4484 			const struct intel_shared_dpll *pll,
4485 			const struct intel_dpll_hw_state *dpll_hw_state)
4486 {
4487 	if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
4488 		return 0;
4489 
4490 	return pll->info->funcs->get_freq(display, pll, dpll_hw_state);
4491 }
4492 
4493 /**
4494  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4495  * @display: intel_display device instance
4496  * @pll: DPLL for which to calculate the output frequency
4497  * @dpll_hw_state: DPLL's hardware state
4498  *
4499  * Read out @pll's hardware state into @dpll_hw_state.
4500  */
4501 bool intel_dpll_get_hw_state(struct intel_display *display,
4502 			     struct intel_shared_dpll *pll,
4503 			     struct intel_dpll_hw_state *dpll_hw_state)
4504 {
4505 	return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
4506 }
4507 
4508 static void readout_dpll_hw_state(struct intel_display *display,
4509 				  struct intel_shared_dpll *pll)
4510 {
4511 	struct intel_crtc *crtc;
4512 
4513 	pll->on = intel_dpll_get_hw_state(display, pll, &pll->state.hw_state);
4514 
4515 	if (pll->on && pll->info->power_domain)
4516 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
4517 
4518 	pll->state.pipe_mask = 0;
4519 	for_each_intel_crtc(display->drm, crtc) {
4520 		struct intel_crtc_state *crtc_state =
4521 			to_intel_crtc_state(crtc->base.state);
4522 
4523 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4524 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4525 	}
4526 	pll->active_mask = pll->state.pipe_mask;
4527 
4528 	drm_dbg_kms(display->drm,
4529 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4530 		    pll->info->name, pll->state.pipe_mask, pll->on);
4531 }
4532 
4533 void intel_dpll_update_ref_clks(struct intel_display *display)
4534 {
4535 	if (display->dpll.mgr && display->dpll.mgr->update_ref_clks)
4536 		display->dpll.mgr->update_ref_clks(display);
4537 }
4538 
4539 void intel_dpll_readout_hw_state(struct intel_display *display)
4540 {
4541 	struct intel_shared_dpll *pll;
4542 	int i;
4543 
4544 	for_each_shared_dpll(display, pll, i)
4545 		readout_dpll_hw_state(display, pll);
4546 }
4547 
4548 static void sanitize_dpll_state(struct intel_display *display,
4549 				struct intel_shared_dpll *pll)
4550 {
4551 	if (!pll->on)
4552 		return;
4553 
4554 	adlp_cmtg_clock_gating_wa(display, pll);
4555 
4556 	if (pll->active_mask)
4557 		return;
4558 
4559 	drm_dbg_kms(display->drm,
4560 		    "%s enabled but not in use, disabling\n",
4561 		    pll->info->name);
4562 
4563 	_intel_disable_shared_dpll(display, pll);
4564 }
4565 
4566 void intel_dpll_sanitize_state(struct intel_display *display)
4567 {
4568 	struct intel_shared_dpll *pll;
4569 	int i;
4570 
4571 	intel_cx0_pll_power_save_wa(display);
4572 
4573 	for_each_shared_dpll(display, pll, i)
4574 		sanitize_dpll_state(display, pll);
4575 }
4576 
4577 /**
4578  * intel_dpll_dump_hw_state - dump hw_state
4579  * @display: intel_display structure
4580  * @p: where to print the state to
4581  * @dpll_hw_state: hw state to be dumped
4582  *
4583  * Dumo out the relevant values in @dpll_hw_state.
4584  */
4585 void intel_dpll_dump_hw_state(struct intel_display *display,
4586 			      struct drm_printer *p,
4587 			      const struct intel_dpll_hw_state *dpll_hw_state)
4588 {
4589 	if (display->dpll.mgr) {
4590 		display->dpll.mgr->dump_hw_state(p, dpll_hw_state);
4591 	} else {
4592 		/* fallback for platforms that don't use the shared dpll
4593 		 * infrastructure
4594 		 */
4595 		ibx_dump_hw_state(p, dpll_hw_state);
4596 	}
4597 }
4598 
4599 /**
4600  * intel_dpll_compare_hw_state - compare the two states
4601  * @display: intel_display structure
4602  * @a: first DPLL hw state
4603  * @b: second DPLL hw state
4604  *
4605  * Compare DPLL hw states @a and @b.
4606  *
4607  * Returns: true if the states are equal, false if the differ
4608  */
4609 bool intel_dpll_compare_hw_state(struct intel_display *display,
4610 				 const struct intel_dpll_hw_state *a,
4611 				 const struct intel_dpll_hw_state *b)
4612 {
4613 	if (display->dpll.mgr) {
4614 		return display->dpll.mgr->compare_hw_state(a, b);
4615 	} else {
4616 		/* fallback for platforms that don't use the shared dpll
4617 		 * infrastructure
4618 		 */
4619 		return ibx_compare_hw_state(a, b);
4620 	}
4621 }
4622 
4623 static void
4624 verify_single_dpll_state(struct intel_display *display,
4625 			 struct intel_shared_dpll *pll,
4626 			 struct intel_crtc *crtc,
4627 			 const struct intel_crtc_state *new_crtc_state)
4628 {
4629 	struct intel_dpll_hw_state dpll_hw_state = {};
4630 	u8 pipe_mask;
4631 	bool active;
4632 
4633 	active = intel_dpll_get_hw_state(display, pll, &dpll_hw_state);
4634 
4635 	if (!pll->info->always_on) {
4636 		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4637 					 "%s: pll in active use but not on in sw tracking\n",
4638 					 pll->info->name);
4639 		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4640 					 "%s: pll is on but not used by any active pipe\n",
4641 					 pll->info->name);
4642 		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4643 					 "%s: pll on state mismatch (expected %i, found %i)\n",
4644 					 pll->info->name, pll->on, active);
4645 	}
4646 
4647 	if (!crtc) {
4648 		INTEL_DISPLAY_STATE_WARN(display,
4649 					 pll->active_mask & ~pll->state.pipe_mask,
4650 					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4651 					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4652 
4653 		return;
4654 	}
4655 
4656 	pipe_mask = BIT(crtc->pipe);
4657 
4658 	if (new_crtc_state->hw.active)
4659 		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4660 					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4661 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4662 	else
4663 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4664 					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4665 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4666 
4667 	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4668 				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4669 				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4670 
4671 	INTEL_DISPLAY_STATE_WARN(display,
4672 				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4673 						   sizeof(dpll_hw_state)),
4674 				 "%s: pll hw state mismatch\n",
4675 				 pll->info->name);
4676 }
4677 
4678 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4679 			      const struct intel_shared_dpll *new_pll)
4680 {
4681 	return old_pll && new_pll && old_pll != new_pll &&
4682 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4683 }
4684 
4685 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4686 				    struct intel_crtc *crtc)
4687 {
4688 	struct intel_display *display = to_intel_display(state);
4689 	const struct intel_crtc_state *old_crtc_state =
4690 		intel_atomic_get_old_crtc_state(state, crtc);
4691 	const struct intel_crtc_state *new_crtc_state =
4692 		intel_atomic_get_new_crtc_state(state, crtc);
4693 
4694 	if (new_crtc_state->shared_dpll)
4695 		verify_single_dpll_state(display, new_crtc_state->shared_dpll,
4696 					 crtc, new_crtc_state);
4697 
4698 	if (old_crtc_state->shared_dpll &&
4699 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4700 		u8 pipe_mask = BIT(crtc->pipe);
4701 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4702 
4703 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4704 					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4705 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4706 
4707 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4708 		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4709 								     new_crtc_state->shared_dpll) &&
4710 					 pll->state.pipe_mask & pipe_mask,
4711 					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4712 					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4713 	}
4714 }
4715 
4716 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4717 {
4718 	struct intel_display *display = to_intel_display(state);
4719 	struct intel_shared_dpll *pll;
4720 	int i;
4721 
4722 	for_each_shared_dpll(display, pll, i)
4723 		verify_single_dpll_state(display, pll, NULL, NULL);
4724 }
4725