xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision c31f4aa8fed048fa70e742c4bb49bb48dc489ab3)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include <drm/drm_print.h>
28 
29 #include "bxt_dpio_phy_regs.h"
30 #include "intel_cx0_phy.h"
31 #include "intel_de.h"
32 #include "intel_display_regs.h"
33 #include "intel_display_types.h"
34 #include "intel_display_utils.h"
35 #include "intel_dkl_phy.h"
36 #include "intel_dkl_phy_regs.h"
37 #include "intel_dpio_phy.h"
38 #include "intel_dpll.h"
39 #include "intel_dpll_mgr.h"
40 #include "intel_hti.h"
41 #include "intel_mg_phy_regs.h"
42 #include "intel_pch_refclk.h"
43 #include "intel_step.h"
44 #include "intel_tc.h"
45 
46 /**
47  * DOC: Display PLLs
48  *
49  * Display PLLs used for driving outputs vary by platform. While some have
50  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
51  * from a pool. In the latter scenario, it is possible that multiple pipes
52  * share a PLL if their configurations match.
53  *
54  * This file provides an abstraction over display PLLs. The function
55  * intel_dpll_init() initializes the PLLs for the given platform.  The
56  * users of a PLL are tracked and that tracking is integrated with the atomic
57  * modset interface. During an atomic operation, required PLLs can be reserved
58  * for a given CRTC and encoder configuration by calling
59  * intel_dpll_reserve() and previously reserved PLLs can be released
60  * with intel_dpll_release().
61  * Changes to the users are first staged in the atomic state, and then made
62  * effective by calling intel_dpll_swap_state() during the atomic
63  * commit phase.
64  */
65 
66 /* platform specific hooks for managing DPLLs */
67 struct intel_dpll_funcs {
68 	/*
69 	 * Hook for enabling the pll, called from intel_enable_dpll() if
70 	 * the pll is not already enabled.
71 	 */
72 	void (*enable)(struct intel_display *display,
73 		       struct intel_dpll *pll,
74 		       const struct intel_dpll_hw_state *dpll_hw_state);
75 
76 	/*
77 	 * Hook for disabling the pll, called from intel_disable_dpll()
78 	 * only when it is safe to disable the pll, i.e., there are no more
79 	 * tracked users for it.
80 	 */
81 	void (*disable)(struct intel_display *display,
82 			struct intel_dpll *pll);
83 
84 	/*
85 	 * Hook for reading the values currently programmed to the DPLL
86 	 * registers. This is used for initial hw state readout and state
87 	 * verification after a mode set.
88 	 */
89 	bool (*get_hw_state)(struct intel_display *display,
90 			     struct intel_dpll *pll,
91 			     struct intel_dpll_hw_state *dpll_hw_state);
92 
93 	/*
94 	 * Hook for calculating the pll's output frequency based on its passed
95 	 * in state.
96 	 */
97 	int (*get_freq)(struct intel_display *i915,
98 			const struct intel_dpll *pll,
99 			const struct intel_dpll_hw_state *dpll_hw_state);
100 };
101 
102 struct intel_dpll_mgr {
103 	const struct dpll_info *dpll_info;
104 
105 	int (*compute_dplls)(struct intel_atomic_state *state,
106 			     struct intel_crtc *crtc,
107 			     struct intel_encoder *encoder);
108 	int (*get_dplls)(struct intel_atomic_state *state,
109 			 struct intel_crtc *crtc,
110 			 struct intel_encoder *encoder);
111 	void (*put_dplls)(struct intel_atomic_state *state,
112 			  struct intel_crtc *crtc);
113 	void (*update_active_dpll)(struct intel_atomic_state *state,
114 				   struct intel_crtc *crtc,
115 				   struct intel_encoder *encoder);
116 	void (*update_ref_clks)(struct intel_display *display);
117 	void (*dump_hw_state)(struct drm_printer *p,
118 			      const struct intel_dpll_hw_state *dpll_hw_state);
119 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
120 				 const struct intel_dpll_hw_state *b);
121 };
122 
123 static void
124 intel_atomic_duplicate_dpll_state(struct intel_display *display,
125 				  struct intel_dpll_state *dpll_state)
126 {
127 	struct intel_dpll *pll;
128 	int i;
129 
130 	/* Copy dpll state */
131 	for_each_dpll(display, pll, i)
132 		dpll_state[pll->index] = pll->state;
133 }
134 
135 static struct intel_dpll_state *
136 intel_atomic_get_dpll_state(struct drm_atomic_state *s)
137 {
138 	struct intel_atomic_state *state = to_intel_atomic_state(s);
139 	struct intel_display *display = to_intel_display(state);
140 
141 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
142 
143 	if (!state->dpll_set) {
144 		state->dpll_set = true;
145 
146 		intel_atomic_duplicate_dpll_state(display,
147 						  state->dpll_state);
148 	}
149 
150 	return state->dpll_state;
151 }
152 
153 /**
154  * intel_get_dpll_by_id - get a DPLL given its id
155  * @display: intel_display device instance
156  * @id: pll id
157  *
158  * Returns:
159  * A pointer to the DPLL with @id
160  */
161 struct intel_dpll *
162 intel_get_dpll_by_id(struct intel_display *display,
163 		     enum intel_dpll_id id)
164 {
165 	struct intel_dpll *pll;
166 	int i;
167 
168 	for_each_dpll(display, pll, i) {
169 		if (pll->info->id == id)
170 			return pll;
171 	}
172 
173 	MISSING_CASE(id);
174 	return NULL;
175 }
176 
177 /* For ILK+ */
178 void assert_dpll(struct intel_display *display,
179 		 struct intel_dpll *pll,
180 		 bool state)
181 {
182 	bool cur_state;
183 	struct intel_dpll_hw_state hw_state;
184 
185 	if (drm_WARN(display->drm, !pll,
186 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
187 		return;
188 
189 	cur_state = intel_dpll_get_hw_state(display, pll, &hw_state);
190 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
191 				 "%s assertion failure (expected %s, current %s)\n",
192 				 pll->info->name, str_on_off(state),
193 				 str_on_off(cur_state));
194 }
195 
196 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
197 {
198 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
199 }
200 
201 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
202 {
203 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
204 }
205 
206 static i915_reg_t
207 intel_combo_pll_enable_reg(struct intel_display *display,
208 			   struct intel_dpll *pll)
209 {
210 	if (display->platform.dg1)
211 		return DG1_DPLL_ENABLE(pll->info->id);
212 	else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
213 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
214 		return MG_PLL_ENABLE(0);
215 
216 	return ICL_DPLL_ENABLE(pll->info->id);
217 }
218 
219 static i915_reg_t
220 intel_tc_pll_enable_reg(struct intel_display *display,
221 			struct intel_dpll *pll)
222 {
223 	const enum intel_dpll_id id = pll->info->id;
224 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
225 
226 	if (display->platform.alderlake_p)
227 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
228 
229 	return MG_PLL_ENABLE(tc_port);
230 }
231 
232 static void _intel_enable_shared_dpll(struct intel_display *display,
233 				      struct intel_dpll *pll)
234 {
235 	if (pll->info->power_domain)
236 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
237 
238 	pll->info->funcs->enable(display, pll, &pll->state.hw_state);
239 	pll->on = true;
240 }
241 
242 static void _intel_disable_shared_dpll(struct intel_display *display,
243 				       struct intel_dpll *pll)
244 {
245 	pll->info->funcs->disable(display, pll);
246 	pll->on = false;
247 
248 	if (pll->info->power_domain)
249 		intel_display_power_put(display, pll->info->power_domain, pll->wakeref);
250 }
251 
252 /**
253  * intel_dpll_enable - enable a CRTC's DPLL
254  * @crtc_state: CRTC, and its state, which has a DPLL
255  *
256  * Enable DPLL used by @crtc.
257  */
258 void intel_dpll_enable(const struct intel_crtc_state *crtc_state)
259 {
260 	struct intel_display *display = to_intel_display(crtc_state);
261 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
262 	struct intel_dpll *pll = crtc_state->intel_dpll;
263 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
264 	unsigned int old_mask;
265 
266 	if (drm_WARN_ON(display->drm, !pll))
267 		return;
268 
269 	mutex_lock(&display->dpll.lock);
270 	old_mask = pll->active_mask;
271 
272 	if (drm_WARN_ON(display->drm, !(pll->state.pipe_mask & pipe_mask)) ||
273 	    drm_WARN_ON(display->drm, pll->active_mask & pipe_mask))
274 		goto out;
275 
276 	pll->active_mask |= pipe_mask;
277 
278 	drm_dbg_kms(display->drm,
279 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
280 		    pll->info->name, pll->active_mask, pll->on,
281 		    crtc->base.base.id, crtc->base.name);
282 
283 	if (old_mask) {
284 		drm_WARN_ON(display->drm, !pll->on);
285 		assert_dpll_enabled(display, pll);
286 		goto out;
287 	}
288 	drm_WARN_ON(display->drm, pll->on);
289 
290 	drm_dbg_kms(display->drm, "enabling %s\n", pll->info->name);
291 
292 	_intel_enable_shared_dpll(display, pll);
293 
294 out:
295 	mutex_unlock(&display->dpll.lock);
296 }
297 
298 /**
299  * intel_dpll_disable - disable a CRTC's shared DPLL
300  * @crtc_state: CRTC, and its state, which has a shared DPLL
301  *
302  * Disable DPLL used by @crtc.
303  */
304 void intel_dpll_disable(const struct intel_crtc_state *crtc_state)
305 {
306 	struct intel_display *display = to_intel_display(crtc_state);
307 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
308 	struct intel_dpll *pll = crtc_state->intel_dpll;
309 	unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
310 
311 	/* PCH only available on ILK+ */
312 	if (DISPLAY_VER(display) < 5)
313 		return;
314 
315 	if (pll == NULL)
316 		return;
317 
318 	mutex_lock(&display->dpll.lock);
319 	if (drm_WARN(display->drm, !(pll->active_mask & pipe_mask),
320 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
321 		     crtc->base.base.id, crtc->base.name))
322 		goto out;
323 
324 	drm_dbg_kms(display->drm,
325 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
326 		    pll->info->name, pll->active_mask, pll->on,
327 		    crtc->base.base.id, crtc->base.name);
328 
329 	assert_dpll_enabled(display, pll);
330 	drm_WARN_ON(display->drm, !pll->on);
331 
332 	pll->active_mask &= ~pipe_mask;
333 	if (pll->active_mask)
334 		goto out;
335 
336 	drm_dbg_kms(display->drm, "disabling %s\n", pll->info->name);
337 
338 	_intel_disable_shared_dpll(display, pll);
339 
340 out:
341 	mutex_unlock(&display->dpll.lock);
342 }
343 
344 static unsigned long
345 intel_dpll_mask_all(struct intel_display *display)
346 {
347 	struct intel_dpll *pll;
348 	unsigned long dpll_mask = 0;
349 	int i;
350 
351 	for_each_dpll(display, pll, i) {
352 		drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
353 
354 		dpll_mask |= BIT(pll->info->id);
355 	}
356 
357 	return dpll_mask;
358 }
359 
360 static struct intel_dpll *
361 intel_find_dpll(struct intel_atomic_state *state,
362 		const struct intel_crtc *crtc,
363 		const struct intel_dpll_hw_state *dpll_hw_state,
364 		unsigned long dpll_mask)
365 {
366 	struct intel_display *display = to_intel_display(crtc);
367 	unsigned long dpll_mask_all = intel_dpll_mask_all(display);
368 	struct intel_dpll_state *dpll_state;
369 	struct intel_dpll *unused_pll = NULL;
370 	enum intel_dpll_id id;
371 
372 	dpll_state = intel_atomic_get_dpll_state(&state->base);
373 
374 	drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
375 
376 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
377 		struct intel_dpll *pll;
378 
379 		pll = intel_get_dpll_by_id(display, id);
380 		if (!pll)
381 			continue;
382 
383 		/* Only want to check enabled timings first */
384 		if (dpll_state[pll->index].pipe_mask == 0) {
385 			if (!unused_pll)
386 				unused_pll = pll;
387 			continue;
388 		}
389 
390 		if (memcmp(dpll_hw_state,
391 			   &dpll_state[pll->index].hw_state,
392 			   sizeof(*dpll_hw_state)) == 0) {
393 			drm_dbg_kms(display->drm,
394 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
395 				    crtc->base.base.id, crtc->base.name,
396 				    pll->info->name,
397 				    dpll_state[pll->index].pipe_mask,
398 				    pll->active_mask);
399 			return pll;
400 		}
401 	}
402 
403 	/* Ok no matching timings, maybe there's a free one? */
404 	if (unused_pll) {
405 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] allocated %s\n",
406 			    crtc->base.base.id, crtc->base.name,
407 			    unused_pll->info->name);
408 		return unused_pll;
409 	}
410 
411 	return NULL;
412 }
413 
414 /**
415  * intel_dpll_crtc_get - Get a DPLL reference for a CRTC
416  * @crtc: CRTC on which behalf the reference is taken
417  * @pll: DPLL for which the reference is taken
418  * @dpll_state: the DPLL atomic state in which the reference is tracked
419  *
420  * Take a reference for @pll tracking the use of it by @crtc.
421  */
422 static void
423 intel_dpll_crtc_get(const struct intel_crtc *crtc,
424 		    const struct intel_dpll *pll,
425 		    struct intel_dpll_state *dpll_state)
426 {
427 	struct intel_display *display = to_intel_display(crtc);
428 
429 	drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
430 
431 	dpll_state->pipe_mask |= BIT(crtc->pipe);
432 
433 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
434 		    crtc->base.base.id, crtc->base.name, pll->info->name);
435 }
436 
437 static void
438 intel_reference_dpll(struct intel_atomic_state *state,
439 		     const struct intel_crtc *crtc,
440 		     const struct intel_dpll *pll,
441 		     const struct intel_dpll_hw_state *dpll_hw_state)
442 {
443 	struct intel_dpll_state *dpll_state;
444 
445 	dpll_state = intel_atomic_get_dpll_state(&state->base);
446 
447 	if (dpll_state[pll->index].pipe_mask == 0)
448 		dpll_state[pll->index].hw_state = *dpll_hw_state;
449 
450 	intel_dpll_crtc_get(crtc, pll, &dpll_state[pll->index]);
451 }
452 
453 /**
454  * intel_dpll_crtc_put - Drop a DPLL reference for a CRTC
455  * @crtc: CRTC on which behalf the reference is dropped
456  * @pll: DPLL for which the reference is dropped
457  * @dpll_state: the DPLL atomic state in which the reference is tracked
458  *
459  * Drop a reference for @pll tracking the end of use of it by @crtc.
460  */
461 void
462 intel_dpll_crtc_put(const struct intel_crtc *crtc,
463 		    const struct intel_dpll *pll,
464 		    struct intel_dpll_state *dpll_state)
465 {
466 	struct intel_display *display = to_intel_display(crtc);
467 
468 	drm_WARN_ON(display->drm, (dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
469 
470 	dpll_state->pipe_mask &= ~BIT(crtc->pipe);
471 
472 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
473 		    crtc->base.base.id, crtc->base.name, pll->info->name);
474 }
475 
476 static void intel_unreference_dpll(struct intel_atomic_state *state,
477 				   const struct intel_crtc *crtc,
478 				   const struct intel_dpll *pll)
479 {
480 	struct intel_dpll_state *dpll_state;
481 
482 	dpll_state = intel_atomic_get_dpll_state(&state->base);
483 
484 	intel_dpll_crtc_put(crtc, pll, &dpll_state[pll->index]);
485 }
486 
487 static void intel_put_dpll(struct intel_atomic_state *state,
488 			   struct intel_crtc *crtc)
489 {
490 	const struct intel_crtc_state *old_crtc_state =
491 		intel_atomic_get_old_crtc_state(state, crtc);
492 	struct intel_crtc_state *new_crtc_state =
493 		intel_atomic_get_new_crtc_state(state, crtc);
494 
495 	new_crtc_state->intel_dpll = NULL;
496 
497 	if (!old_crtc_state->intel_dpll)
498 		return;
499 
500 	intel_unreference_dpll(state, crtc, old_crtc_state->intel_dpll);
501 }
502 
503 /**
504  * intel_dpll_swap_state - make atomic DPLL configuration effective
505  * @state: atomic state
506  *
507  * This is the dpll version of drm_atomic_helper_swap_state() since the
508  * helper does not handle driver-specific global state.
509  *
510  * For consistency with atomic helpers this function does a complete swap,
511  * i.e. it also puts the current state into @state, even though there is no
512  * need for that at this moment.
513  */
514 void intel_dpll_swap_state(struct intel_atomic_state *state)
515 {
516 	struct intel_display *display = to_intel_display(state);
517 	struct intel_dpll_state *dpll_state = state->dpll_state;
518 	struct intel_dpll *pll;
519 	int i;
520 
521 	if (!state->dpll_set)
522 		return;
523 
524 	for_each_dpll(display, pll, i)
525 		swap(pll->state, dpll_state[pll->index]);
526 }
527 
528 static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
529 				      struct intel_dpll *pll,
530 				      struct intel_dpll_hw_state *dpll_hw_state)
531 {
532 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
533 	const enum intel_dpll_id id = pll->info->id;
534 	intel_wakeref_t wakeref;
535 	u32 val;
536 
537 	wakeref = intel_display_power_get_if_enabled(display,
538 						     POWER_DOMAIN_DISPLAY_CORE);
539 	if (!wakeref)
540 		return false;
541 
542 	val = intel_de_read(display, PCH_DPLL(id));
543 	hw_state->dpll = val;
544 	hw_state->fp0 = intel_de_read(display, PCH_FP0(id));
545 	hw_state->fp1 = intel_de_read(display, PCH_FP1(id));
546 
547 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
548 
549 	return val & DPLL_VCO_ENABLE;
550 }
551 
552 static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
553 {
554 	u32 val;
555 	bool enabled;
556 
557 	val = intel_de_read(display, PCH_DREF_CONTROL);
558 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
559 			    DREF_SUPERSPREAD_SOURCE_MASK));
560 	INTEL_DISPLAY_STATE_WARN(display, !enabled,
561 				 "PCH refclk assertion failure, should be active but is disabled\n");
562 }
563 
564 static void ibx_pch_dpll_enable(struct intel_display *display,
565 				struct intel_dpll *pll,
566 				const struct intel_dpll_hw_state *dpll_hw_state)
567 {
568 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
569 	const enum intel_dpll_id id = pll->info->id;
570 
571 	/* PCH refclock must be enabled first */
572 	ibx_assert_pch_refclk_enabled(display);
573 
574 	intel_de_write(display, PCH_FP0(id), hw_state->fp0);
575 	intel_de_write(display, PCH_FP1(id), hw_state->fp1);
576 
577 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
578 
579 	/* Wait for the clocks to stabilize. */
580 	intel_de_posting_read(display, PCH_DPLL(id));
581 	udelay(150);
582 
583 	/* The pixel multiplier can only be updated once the
584 	 * DPLL is enabled and the clocks are stable.
585 	 *
586 	 * So write it again.
587 	 */
588 	intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
589 	intel_de_posting_read(display, PCH_DPLL(id));
590 	udelay(200);
591 }
592 
593 static void ibx_pch_dpll_disable(struct intel_display *display,
594 				 struct intel_dpll *pll)
595 {
596 	const enum intel_dpll_id id = pll->info->id;
597 
598 	intel_de_write(display, PCH_DPLL(id), 0);
599 	intel_de_posting_read(display, PCH_DPLL(id));
600 	udelay(200);
601 }
602 
603 static int ibx_compute_dpll(struct intel_atomic_state *state,
604 			    struct intel_crtc *crtc,
605 			    struct intel_encoder *encoder)
606 {
607 	return 0;
608 }
609 
610 static int ibx_get_dpll(struct intel_atomic_state *state,
611 			struct intel_crtc *crtc,
612 			struct intel_encoder *encoder)
613 {
614 	struct intel_display *display = to_intel_display(state);
615 	struct intel_crtc_state *crtc_state =
616 		intel_atomic_get_new_crtc_state(state, crtc);
617 	struct intel_dpll *pll;
618 	enum intel_dpll_id id;
619 
620 	if (HAS_PCH_IBX(display)) {
621 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
622 		id = (enum intel_dpll_id) crtc->pipe;
623 		pll = intel_get_dpll_by_id(display, id);
624 
625 		drm_dbg_kms(display->drm,
626 			    "[CRTC:%d:%s] using pre-allocated %s\n",
627 			    crtc->base.base.id, crtc->base.name,
628 			    pll->info->name);
629 	} else {
630 		pll = intel_find_dpll(state, crtc,
631 				      &crtc_state->dpll_hw_state,
632 				      BIT(DPLL_ID_PCH_PLL_B) |
633 				      BIT(DPLL_ID_PCH_PLL_A));
634 	}
635 
636 	if (!pll)
637 		return -EINVAL;
638 
639 	/* reference the pll */
640 	intel_reference_dpll(state, crtc,
641 			     pll, &crtc_state->dpll_hw_state);
642 
643 	crtc_state->intel_dpll = pll;
644 
645 	return 0;
646 }
647 
648 static void ibx_dump_hw_state(struct drm_printer *p,
649 			      const struct intel_dpll_hw_state *dpll_hw_state)
650 {
651 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
652 
653 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
654 		   "fp0: 0x%x, fp1: 0x%x\n",
655 		   hw_state->dpll,
656 		   hw_state->dpll_md,
657 		   hw_state->fp0,
658 		   hw_state->fp1);
659 }
660 
661 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
662 				 const struct intel_dpll_hw_state *_b)
663 {
664 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
665 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
666 
667 	return a->dpll == b->dpll &&
668 		a->dpll_md == b->dpll_md &&
669 		a->fp0 == b->fp0 &&
670 		a->fp1 == b->fp1;
671 }
672 
673 static const struct intel_dpll_funcs ibx_pch_dpll_funcs = {
674 	.enable = ibx_pch_dpll_enable,
675 	.disable = ibx_pch_dpll_disable,
676 	.get_hw_state = ibx_pch_dpll_get_hw_state,
677 };
678 
679 static const struct dpll_info pch_plls[] = {
680 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
681 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
682 	{}
683 };
684 
685 static const struct intel_dpll_mgr pch_pll_mgr = {
686 	.dpll_info = pch_plls,
687 	.compute_dplls = ibx_compute_dpll,
688 	.get_dplls = ibx_get_dpll,
689 	.put_dplls = intel_put_dpll,
690 	.dump_hw_state = ibx_dump_hw_state,
691 	.compare_hw_state = ibx_compare_hw_state,
692 };
693 
694 static void hsw_ddi_wrpll_enable(struct intel_display *display,
695 				 struct intel_dpll *pll,
696 				 const struct intel_dpll_hw_state *dpll_hw_state)
697 {
698 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
699 	const enum intel_dpll_id id = pll->info->id;
700 
701 	intel_de_write(display, WRPLL_CTL(id), hw_state->wrpll);
702 	intel_de_posting_read(display, WRPLL_CTL(id));
703 	udelay(20);
704 }
705 
706 static void hsw_ddi_spll_enable(struct intel_display *display,
707 				struct intel_dpll *pll,
708 				const struct intel_dpll_hw_state *dpll_hw_state)
709 {
710 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
711 
712 	intel_de_write(display, SPLL_CTL, hw_state->spll);
713 	intel_de_posting_read(display, SPLL_CTL);
714 	udelay(20);
715 }
716 
717 static void hsw_ddi_wrpll_disable(struct intel_display *display,
718 				  struct intel_dpll *pll)
719 {
720 	const enum intel_dpll_id id = pll->info->id;
721 
722 	intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
723 	intel_de_posting_read(display, WRPLL_CTL(id));
724 
725 	/*
726 	 * Try to set up the PCH reference clock once all DPLLs
727 	 * that depend on it have been shut down.
728 	 */
729 	if (display->dpll.pch_ssc_use & BIT(id))
730 		intel_init_pch_refclk(display);
731 }
732 
733 static void hsw_ddi_spll_disable(struct intel_display *display,
734 				 struct intel_dpll *pll)
735 {
736 	enum intel_dpll_id id = pll->info->id;
737 
738 	intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
739 	intel_de_posting_read(display, SPLL_CTL);
740 
741 	/*
742 	 * Try to set up the PCH reference clock once all DPLLs
743 	 * that depend on it have been shut down.
744 	 */
745 	if (display->dpll.pch_ssc_use & BIT(id))
746 		intel_init_pch_refclk(display);
747 }
748 
749 static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
750 				       struct intel_dpll *pll,
751 				       struct intel_dpll_hw_state *dpll_hw_state)
752 {
753 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
754 	const enum intel_dpll_id id = pll->info->id;
755 	intel_wakeref_t wakeref;
756 	u32 val;
757 
758 	wakeref = intel_display_power_get_if_enabled(display,
759 						     POWER_DOMAIN_DISPLAY_CORE);
760 	if (!wakeref)
761 		return false;
762 
763 	val = intel_de_read(display, WRPLL_CTL(id));
764 	hw_state->wrpll = val;
765 
766 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
767 
768 	return val & WRPLL_PLL_ENABLE;
769 }
770 
771 static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
772 				      struct intel_dpll *pll,
773 				      struct intel_dpll_hw_state *dpll_hw_state)
774 {
775 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
776 	intel_wakeref_t wakeref;
777 	u32 val;
778 
779 	wakeref = intel_display_power_get_if_enabled(display,
780 						     POWER_DOMAIN_DISPLAY_CORE);
781 	if (!wakeref)
782 		return false;
783 
784 	val = intel_de_read(display, SPLL_CTL);
785 	hw_state->spll = val;
786 
787 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
788 
789 	return val & SPLL_PLL_ENABLE;
790 }
791 
792 #define LC_FREQ 2700
793 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
794 
795 #define P_MIN 2
796 #define P_MAX 64
797 #define P_INC 2
798 
799 /* Constraints for PLL good behavior */
800 #define REF_MIN 48
801 #define REF_MAX 400
802 #define VCO_MIN 2400
803 #define VCO_MAX 4800
804 
805 struct hsw_wrpll_rnp {
806 	unsigned p, n2, r2;
807 };
808 
809 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
810 {
811 	switch (clock) {
812 	case 25175000:
813 	case 25200000:
814 	case 27000000:
815 	case 27027000:
816 	case 37762500:
817 	case 37800000:
818 	case 40500000:
819 	case 40541000:
820 	case 54000000:
821 	case 54054000:
822 	case 59341000:
823 	case 59400000:
824 	case 72000000:
825 	case 74176000:
826 	case 74250000:
827 	case 81000000:
828 	case 81081000:
829 	case 89012000:
830 	case 89100000:
831 	case 108000000:
832 	case 108108000:
833 	case 111264000:
834 	case 111375000:
835 	case 148352000:
836 	case 148500000:
837 	case 162000000:
838 	case 162162000:
839 	case 222525000:
840 	case 222750000:
841 	case 296703000:
842 	case 297000000:
843 		return 0;
844 	case 233500000:
845 	case 245250000:
846 	case 247750000:
847 	case 253250000:
848 	case 298000000:
849 		return 1500;
850 	case 169128000:
851 	case 169500000:
852 	case 179500000:
853 	case 202000000:
854 		return 2000;
855 	case 256250000:
856 	case 262500000:
857 	case 270000000:
858 	case 272500000:
859 	case 273750000:
860 	case 280750000:
861 	case 281250000:
862 	case 286000000:
863 	case 291750000:
864 		return 4000;
865 	case 267250000:
866 	case 268500000:
867 		return 5000;
868 	default:
869 		return 1000;
870 	}
871 }
872 
873 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
874 				 unsigned int r2, unsigned int n2,
875 				 unsigned int p,
876 				 struct hsw_wrpll_rnp *best)
877 {
878 	u64 a, b, c, d, diff, diff_best;
879 
880 	/* No best (r,n,p) yet */
881 	if (best->p == 0) {
882 		best->p = p;
883 		best->n2 = n2;
884 		best->r2 = r2;
885 		return;
886 	}
887 
888 	/*
889 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
890 	 * freq2k.
891 	 *
892 	 * delta = 1e6 *
893 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
894 	 *	   freq2k;
895 	 *
896 	 * and we would like delta <= budget.
897 	 *
898 	 * If the discrepancy is above the PPM-based budget, always prefer to
899 	 * improve upon the previous solution.  However, if you're within the
900 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
901 	 */
902 	a = freq2k * budget * p * r2;
903 	b = freq2k * budget * best->p * best->r2;
904 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
905 	diff_best = abs_diff(freq2k * best->p * best->r2,
906 			     LC_FREQ_2K * best->n2);
907 	c = 1000000 * diff;
908 	d = 1000000 * diff_best;
909 
910 	if (a < c && b < d) {
911 		/* If both are above the budget, pick the closer */
912 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
913 			best->p = p;
914 			best->n2 = n2;
915 			best->r2 = r2;
916 		}
917 	} else if (a >= c && b < d) {
918 		/* If A is below the threshold but B is above it?  Update. */
919 		best->p = p;
920 		best->n2 = n2;
921 		best->r2 = r2;
922 	} else if (a >= c && b >= d) {
923 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
924 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
925 			best->p = p;
926 			best->n2 = n2;
927 			best->r2 = r2;
928 		}
929 	}
930 	/* Otherwise a < c && b >= d, do nothing */
931 }
932 
933 static void
934 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
935 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
936 {
937 	u64 freq2k;
938 	unsigned p, n2, r2;
939 	struct hsw_wrpll_rnp best = {};
940 	unsigned budget;
941 
942 	freq2k = clock / 100;
943 
944 	budget = hsw_wrpll_get_budget_for_freq(clock);
945 
946 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
947 	 * and directly pass the LC PLL to it. */
948 	if (freq2k == 5400000) {
949 		*n2_out = 2;
950 		*p_out = 1;
951 		*r2_out = 2;
952 		return;
953 	}
954 
955 	/*
956 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
957 	 * the WR PLL.
958 	 *
959 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
960 	 * Injecting R2 = 2 * R gives:
961 	 *   REF_MAX * r2 > LC_FREQ * 2 and
962 	 *   REF_MIN * r2 < LC_FREQ * 2
963 	 *
964 	 * Which means the desired boundaries for r2 are:
965 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
966 	 *
967 	 */
968 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
969 	     r2 <= LC_FREQ * 2 / REF_MIN;
970 	     r2++) {
971 
972 		/*
973 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
974 		 *
975 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
976 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
977 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
978 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
979 		 *
980 		 * Which means the desired boundaries for n2 are:
981 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
982 		 */
983 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
984 		     n2 <= VCO_MAX * r2 / LC_FREQ;
985 		     n2++) {
986 
987 			for (p = P_MIN; p <= P_MAX; p += P_INC)
988 				hsw_wrpll_update_rnp(freq2k, budget,
989 						     r2, n2, p, &best);
990 		}
991 	}
992 
993 	*n2_out = best.n2;
994 	*p_out = best.p;
995 	*r2_out = best.r2;
996 }
997 
998 static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
999 				  const struct intel_dpll *pll,
1000 				  const struct intel_dpll_hw_state *dpll_hw_state)
1001 {
1002 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1003 	int refclk;
1004 	int n, p, r;
1005 	u32 wrpll = hw_state->wrpll;
1006 
1007 	switch (wrpll & WRPLL_REF_MASK) {
1008 	case WRPLL_REF_SPECIAL_HSW:
1009 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1010 		if (display->platform.haswell && !display->platform.haswell_ult) {
1011 			refclk = display->dpll.ref_clks.nssc;
1012 			break;
1013 		}
1014 		fallthrough;
1015 	case WRPLL_REF_PCH_SSC:
1016 		/*
1017 		 * We could calculate spread here, but our checking
1018 		 * code only cares about 5% accuracy, and spread is a max of
1019 		 * 0.5% downspread.
1020 		 */
1021 		refclk = display->dpll.ref_clks.ssc;
1022 		break;
1023 	case WRPLL_REF_LCPLL:
1024 		refclk = 2700000;
1025 		break;
1026 	default:
1027 		MISSING_CASE(wrpll);
1028 		return 0;
1029 	}
1030 
1031 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1032 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1033 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1034 
1035 	/* Convert to KHz, p & r have a fixed point portion */
1036 	return (refclk * n / 10) / (p * r) * 2;
1037 }
1038 
1039 static int
1040 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1041 			   struct intel_crtc *crtc)
1042 {
1043 	struct intel_display *display = to_intel_display(state);
1044 	struct intel_crtc_state *crtc_state =
1045 		intel_atomic_get_new_crtc_state(state, crtc);
1046 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1047 	unsigned int p, n2, r2;
1048 
1049 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1050 
1051 	hw_state->wrpll =
1052 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1053 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1054 		WRPLL_DIVIDER_POST(p);
1055 
1056 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(display, NULL,
1057 							&crtc_state->dpll_hw_state);
1058 
1059 	return 0;
1060 }
1061 
1062 static struct intel_dpll *
1063 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1064 		       struct intel_crtc *crtc)
1065 {
1066 	struct intel_crtc_state *crtc_state =
1067 		intel_atomic_get_new_crtc_state(state, crtc);
1068 
1069 	return intel_find_dpll(state, crtc,
1070 				      &crtc_state->dpll_hw_state,
1071 				      BIT(DPLL_ID_WRPLL2) |
1072 				      BIT(DPLL_ID_WRPLL1));
1073 }
1074 
1075 static int
1076 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1077 {
1078 	struct intel_display *display = to_intel_display(crtc_state);
1079 	int clock = crtc_state->port_clock;
1080 
1081 	switch (clock / 2) {
1082 	case 81000:
1083 	case 135000:
1084 	case 270000:
1085 		return 0;
1086 	default:
1087 		drm_dbg_kms(display->drm, "Invalid clock for DP: %d\n",
1088 			    clock);
1089 		return -EINVAL;
1090 	}
1091 }
1092 
1093 static struct intel_dpll *
1094 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1095 {
1096 	struct intel_display *display = to_intel_display(crtc_state);
1097 	struct intel_dpll *pll;
1098 	enum intel_dpll_id pll_id;
1099 	int clock = crtc_state->port_clock;
1100 
1101 	switch (clock / 2) {
1102 	case 81000:
1103 		pll_id = DPLL_ID_LCPLL_810;
1104 		break;
1105 	case 135000:
1106 		pll_id = DPLL_ID_LCPLL_1350;
1107 		break;
1108 	case 270000:
1109 		pll_id = DPLL_ID_LCPLL_2700;
1110 		break;
1111 	default:
1112 		MISSING_CASE(clock / 2);
1113 		return NULL;
1114 	}
1115 
1116 	pll = intel_get_dpll_by_id(display, pll_id);
1117 
1118 	if (!pll)
1119 		return NULL;
1120 
1121 	return pll;
1122 }
1123 
1124 static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
1125 				  const struct intel_dpll *pll,
1126 				  const struct intel_dpll_hw_state *dpll_hw_state)
1127 {
1128 	int link_clock = 0;
1129 
1130 	switch (pll->info->id) {
1131 	case DPLL_ID_LCPLL_810:
1132 		link_clock = 81000;
1133 		break;
1134 	case DPLL_ID_LCPLL_1350:
1135 		link_clock = 135000;
1136 		break;
1137 	case DPLL_ID_LCPLL_2700:
1138 		link_clock = 270000;
1139 		break;
1140 	default:
1141 		drm_WARN(display->drm, 1, "bad port clock sel\n");
1142 		break;
1143 	}
1144 
1145 	return link_clock * 2;
1146 }
1147 
1148 static int
1149 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1150 			  struct intel_crtc *crtc)
1151 {
1152 	struct intel_crtc_state *crtc_state =
1153 		intel_atomic_get_new_crtc_state(state, crtc);
1154 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1155 
1156 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1157 		return -EINVAL;
1158 
1159 	hw_state->spll =
1160 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1161 
1162 	return 0;
1163 }
1164 
1165 static struct intel_dpll *
1166 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1167 		      struct intel_crtc *crtc)
1168 {
1169 	struct intel_crtc_state *crtc_state =
1170 		intel_atomic_get_new_crtc_state(state, crtc);
1171 
1172 	return intel_find_dpll(state, crtc, &crtc_state->dpll_hw_state,
1173 				      BIT(DPLL_ID_SPLL));
1174 }
1175 
1176 static int hsw_ddi_spll_get_freq(struct intel_display *display,
1177 				 const struct intel_dpll *pll,
1178 				 const struct intel_dpll_hw_state *dpll_hw_state)
1179 {
1180 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1181 	int link_clock = 0;
1182 
1183 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1184 	case SPLL_FREQ_810MHz:
1185 		link_clock = 81000;
1186 		break;
1187 	case SPLL_FREQ_1350MHz:
1188 		link_clock = 135000;
1189 		break;
1190 	case SPLL_FREQ_2700MHz:
1191 		link_clock = 270000;
1192 		break;
1193 	default:
1194 		drm_WARN(display->drm, 1, "bad spll freq\n");
1195 		break;
1196 	}
1197 
1198 	return link_clock * 2;
1199 }
1200 
1201 static int hsw_compute_dpll(struct intel_atomic_state *state,
1202 			    struct intel_crtc *crtc,
1203 			    struct intel_encoder *encoder)
1204 {
1205 	struct intel_crtc_state *crtc_state =
1206 		intel_atomic_get_new_crtc_state(state, crtc);
1207 
1208 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1209 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1210 	else if (intel_crtc_has_dp_encoder(crtc_state))
1211 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1212 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1213 		return hsw_ddi_spll_compute_dpll(state, crtc);
1214 	else
1215 		return -EINVAL;
1216 }
1217 
1218 static int hsw_get_dpll(struct intel_atomic_state *state,
1219 			struct intel_crtc *crtc,
1220 			struct intel_encoder *encoder)
1221 {
1222 	struct intel_crtc_state *crtc_state =
1223 		intel_atomic_get_new_crtc_state(state, crtc);
1224 	struct intel_dpll *pll = NULL;
1225 
1226 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1227 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1228 	else if (intel_crtc_has_dp_encoder(crtc_state))
1229 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1230 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1231 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1232 
1233 	if (!pll)
1234 		return -EINVAL;
1235 
1236 	intel_reference_dpll(state, crtc,
1237 			     pll, &crtc_state->dpll_hw_state);
1238 
1239 	crtc_state->intel_dpll = pll;
1240 
1241 	return 0;
1242 }
1243 
1244 static void hsw_update_dpll_ref_clks(struct intel_display *display)
1245 {
1246 	display->dpll.ref_clks.ssc = 135000;
1247 	/* Non-SSC is only used on non-ULT HSW. */
1248 	if (intel_de_read(display, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1249 		display->dpll.ref_clks.nssc = 24000;
1250 	else
1251 		display->dpll.ref_clks.nssc = 135000;
1252 }
1253 
1254 static void hsw_dump_hw_state(struct drm_printer *p,
1255 			      const struct intel_dpll_hw_state *dpll_hw_state)
1256 {
1257 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1258 
1259 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1260 		   hw_state->wrpll, hw_state->spll);
1261 }
1262 
1263 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1264 				 const struct intel_dpll_hw_state *_b)
1265 {
1266 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1267 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1268 
1269 	return a->wrpll == b->wrpll &&
1270 		a->spll == b->spll;
1271 }
1272 
1273 static const struct intel_dpll_funcs hsw_ddi_wrpll_funcs = {
1274 	.enable = hsw_ddi_wrpll_enable,
1275 	.disable = hsw_ddi_wrpll_disable,
1276 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1277 	.get_freq = hsw_ddi_wrpll_get_freq,
1278 };
1279 
1280 static const struct intel_dpll_funcs hsw_ddi_spll_funcs = {
1281 	.enable = hsw_ddi_spll_enable,
1282 	.disable = hsw_ddi_spll_disable,
1283 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1284 	.get_freq = hsw_ddi_spll_get_freq,
1285 };
1286 
1287 static void hsw_ddi_lcpll_enable(struct intel_display *display,
1288 				 struct intel_dpll *pll,
1289 				 const struct intel_dpll_hw_state *hw_state)
1290 {
1291 }
1292 
1293 static void hsw_ddi_lcpll_disable(struct intel_display *display,
1294 				  struct intel_dpll *pll)
1295 {
1296 }
1297 
1298 static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
1299 				       struct intel_dpll *pll,
1300 				       struct intel_dpll_hw_state *dpll_hw_state)
1301 {
1302 	return true;
1303 }
1304 
1305 static const struct intel_dpll_funcs hsw_ddi_lcpll_funcs = {
1306 	.enable = hsw_ddi_lcpll_enable,
1307 	.disable = hsw_ddi_lcpll_disable,
1308 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1309 	.get_freq = hsw_ddi_lcpll_get_freq,
1310 };
1311 
1312 static const struct dpll_info hsw_plls[] = {
1313 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1314 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1315 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1316 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1317 	  .always_on = true, },
1318 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1319 	  .always_on = true, },
1320 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1321 	  .always_on = true, },
1322 	{}
1323 };
1324 
1325 static const struct intel_dpll_mgr hsw_pll_mgr = {
1326 	.dpll_info = hsw_plls,
1327 	.compute_dplls = hsw_compute_dpll,
1328 	.get_dplls = hsw_get_dpll,
1329 	.put_dplls = intel_put_dpll,
1330 	.update_ref_clks = hsw_update_dpll_ref_clks,
1331 	.dump_hw_state = hsw_dump_hw_state,
1332 	.compare_hw_state = hsw_compare_hw_state,
1333 };
1334 
1335 struct skl_dpll_regs {
1336 	i915_reg_t ctl, cfgcr1, cfgcr2;
1337 };
1338 
1339 /* this array is indexed by the *shared* pll id */
1340 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1341 	{
1342 		/* DPLL 0 */
1343 		.ctl = LCPLL1_CTL,
1344 		/* DPLL 0 doesn't support HDMI mode */
1345 	},
1346 	{
1347 		/* DPLL 1 */
1348 		.ctl = LCPLL2_CTL,
1349 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1350 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1351 	},
1352 	{
1353 		/* DPLL 2 */
1354 		.ctl = WRPLL_CTL(0),
1355 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1356 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1357 	},
1358 	{
1359 		/* DPLL 3 */
1360 		.ctl = WRPLL_CTL(1),
1361 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1362 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1363 	},
1364 };
1365 
1366 static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
1367 				    struct intel_dpll *pll,
1368 				    const struct skl_dpll_hw_state *hw_state)
1369 {
1370 	const enum intel_dpll_id id = pll->info->id;
1371 
1372 	intel_de_rmw(display, DPLL_CTRL1,
1373 		     DPLL_CTRL1_HDMI_MODE(id) |
1374 		     DPLL_CTRL1_SSC(id) |
1375 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1376 		     hw_state->ctrl1 << (id * 6));
1377 	intel_de_posting_read(display, DPLL_CTRL1);
1378 }
1379 
1380 static void skl_ddi_pll_enable(struct intel_display *display,
1381 			       struct intel_dpll *pll,
1382 			       const struct intel_dpll_hw_state *dpll_hw_state)
1383 {
1384 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1385 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1386 	const enum intel_dpll_id id = pll->info->id;
1387 
1388 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1389 
1390 	intel_de_write(display, regs[id].cfgcr1, hw_state->cfgcr1);
1391 	intel_de_write(display, regs[id].cfgcr2, hw_state->cfgcr2);
1392 	intel_de_posting_read(display, regs[id].cfgcr1);
1393 	intel_de_posting_read(display, regs[id].cfgcr2);
1394 
1395 	/* the enable bit is always bit 31 */
1396 	intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1397 
1398 	if (intel_de_wait_for_set_ms(display, DPLL_STATUS, DPLL_LOCK(id), 5))
1399 		drm_err(display->drm, "DPLL %d not locked\n", id);
1400 }
1401 
1402 static void skl_ddi_dpll0_enable(struct intel_display *display,
1403 				 struct intel_dpll *pll,
1404 				 const struct intel_dpll_hw_state *dpll_hw_state)
1405 {
1406 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1407 
1408 	skl_ddi_pll_write_ctrl1(display, pll, hw_state);
1409 }
1410 
1411 static void skl_ddi_pll_disable(struct intel_display *display,
1412 				struct intel_dpll *pll)
1413 {
1414 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1415 	const enum intel_dpll_id id = pll->info->id;
1416 
1417 	/* the enable bit is always bit 31 */
1418 	intel_de_rmw(display, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1419 	intel_de_posting_read(display, regs[id].ctl);
1420 }
1421 
1422 static void skl_ddi_dpll0_disable(struct intel_display *display,
1423 				  struct intel_dpll *pll)
1424 {
1425 }
1426 
1427 static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
1428 				     struct intel_dpll *pll,
1429 				     struct intel_dpll_hw_state *dpll_hw_state)
1430 {
1431 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1432 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1433 	const enum intel_dpll_id id = pll->info->id;
1434 	intel_wakeref_t wakeref;
1435 	bool ret;
1436 	u32 val;
1437 
1438 	wakeref = intel_display_power_get_if_enabled(display,
1439 						     POWER_DOMAIN_DISPLAY_CORE);
1440 	if (!wakeref)
1441 		return false;
1442 
1443 	ret = false;
1444 
1445 	val = intel_de_read(display, regs[id].ctl);
1446 	if (!(val & LCPLL_PLL_ENABLE))
1447 		goto out;
1448 
1449 	val = intel_de_read(display, DPLL_CTRL1);
1450 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1451 
1452 	/* avoid reading back stale values if HDMI mode is not enabled */
1453 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1454 		hw_state->cfgcr1 = intel_de_read(display, regs[id].cfgcr1);
1455 		hw_state->cfgcr2 = intel_de_read(display, regs[id].cfgcr2);
1456 	}
1457 	ret = true;
1458 
1459 out:
1460 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1461 
1462 	return ret;
1463 }
1464 
1465 static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
1466 				       struct intel_dpll *pll,
1467 				       struct intel_dpll_hw_state *dpll_hw_state)
1468 {
1469 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1470 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1471 	const enum intel_dpll_id id = pll->info->id;
1472 	intel_wakeref_t wakeref;
1473 	u32 val;
1474 	bool ret;
1475 
1476 	wakeref = intel_display_power_get_if_enabled(display,
1477 						     POWER_DOMAIN_DISPLAY_CORE);
1478 	if (!wakeref)
1479 		return false;
1480 
1481 	ret = false;
1482 
1483 	/* DPLL0 is always enabled since it drives CDCLK */
1484 	val = intel_de_read(display, regs[id].ctl);
1485 	if (drm_WARN_ON(display->drm, !(val & LCPLL_PLL_ENABLE)))
1486 		goto out;
1487 
1488 	val = intel_de_read(display, DPLL_CTRL1);
1489 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1490 
1491 	ret = true;
1492 
1493 out:
1494 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1495 
1496 	return ret;
1497 }
1498 
1499 struct skl_wrpll_context {
1500 	u64 min_deviation;		/* current minimal deviation */
1501 	u64 central_freq;		/* chosen central freq */
1502 	u64 dco_freq;			/* chosen dco freq */
1503 	unsigned int p;			/* chosen divider */
1504 };
1505 
1506 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1507 #define SKL_DCO_MAX_PDEVIATION	100
1508 #define SKL_DCO_MAX_NDEVIATION	600
1509 
1510 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1511 				  u64 central_freq,
1512 				  u64 dco_freq,
1513 				  unsigned int divider)
1514 {
1515 	u64 deviation;
1516 
1517 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1518 			      central_freq);
1519 
1520 	/* positive deviation */
1521 	if (dco_freq >= central_freq) {
1522 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1523 		    deviation < ctx->min_deviation) {
1524 			ctx->min_deviation = deviation;
1525 			ctx->central_freq = central_freq;
1526 			ctx->dco_freq = dco_freq;
1527 			ctx->p = divider;
1528 		}
1529 	/* negative deviation */
1530 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1531 		   deviation < ctx->min_deviation) {
1532 		ctx->min_deviation = deviation;
1533 		ctx->central_freq = central_freq;
1534 		ctx->dco_freq = dco_freq;
1535 		ctx->p = divider;
1536 	}
1537 }
1538 
1539 static void skl_wrpll_get_multipliers(unsigned int p,
1540 				      unsigned int *p0 /* out */,
1541 				      unsigned int *p1 /* out */,
1542 				      unsigned int *p2 /* out */)
1543 {
1544 	/* even dividers */
1545 	if (p % 2 == 0) {
1546 		unsigned int half = p / 2;
1547 
1548 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1549 			*p0 = 2;
1550 			*p1 = 1;
1551 			*p2 = half;
1552 		} else if (half % 2 == 0) {
1553 			*p0 = 2;
1554 			*p1 = half / 2;
1555 			*p2 = 2;
1556 		} else if (half % 3 == 0) {
1557 			*p0 = 3;
1558 			*p1 = half / 3;
1559 			*p2 = 2;
1560 		} else if (half % 7 == 0) {
1561 			*p0 = 7;
1562 			*p1 = half / 7;
1563 			*p2 = 2;
1564 		}
1565 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1566 		*p0 = 3;
1567 		*p1 = 1;
1568 		*p2 = p / 3;
1569 	} else if (p == 5 || p == 7) {
1570 		*p0 = p;
1571 		*p1 = 1;
1572 		*p2 = 1;
1573 	} else if (p == 15) {
1574 		*p0 = 3;
1575 		*p1 = 1;
1576 		*p2 = 5;
1577 	} else if (p == 21) {
1578 		*p0 = 7;
1579 		*p1 = 1;
1580 		*p2 = 3;
1581 	} else if (p == 35) {
1582 		*p0 = 7;
1583 		*p1 = 1;
1584 		*p2 = 5;
1585 	}
1586 }
1587 
1588 struct skl_wrpll_params {
1589 	u32 dco_fraction;
1590 	u32 dco_integer;
1591 	u32 qdiv_ratio;
1592 	u32 qdiv_mode;
1593 	u32 kdiv;
1594 	u32 pdiv;
1595 	u32 central_freq;
1596 };
1597 
1598 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1599 				      u64 afe_clock,
1600 				      int ref_clock,
1601 				      u64 central_freq,
1602 				      u32 p0, u32 p1, u32 p2)
1603 {
1604 	u64 dco_freq;
1605 
1606 	switch (central_freq) {
1607 	case 9600000000ULL:
1608 		params->central_freq = 0;
1609 		break;
1610 	case 9000000000ULL:
1611 		params->central_freq = 1;
1612 		break;
1613 	case 8400000000ULL:
1614 		params->central_freq = 3;
1615 	}
1616 
1617 	switch (p0) {
1618 	case 1:
1619 		params->pdiv = 0;
1620 		break;
1621 	case 2:
1622 		params->pdiv = 1;
1623 		break;
1624 	case 3:
1625 		params->pdiv = 2;
1626 		break;
1627 	case 7:
1628 		params->pdiv = 4;
1629 		break;
1630 	default:
1631 		WARN(1, "Incorrect PDiv\n");
1632 	}
1633 
1634 	switch (p2) {
1635 	case 5:
1636 		params->kdiv = 0;
1637 		break;
1638 	case 2:
1639 		params->kdiv = 1;
1640 		break;
1641 	case 3:
1642 		params->kdiv = 2;
1643 		break;
1644 	case 1:
1645 		params->kdiv = 3;
1646 		break;
1647 	default:
1648 		WARN(1, "Incorrect KDiv\n");
1649 	}
1650 
1651 	params->qdiv_ratio = p1;
1652 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1653 
1654 	dco_freq = p0 * p1 * p2 * afe_clock;
1655 
1656 	/*
1657 	 * Intermediate values are in Hz.
1658 	 * Divide by MHz to match bsepc
1659 	 */
1660 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1661 	params->dco_fraction =
1662 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1663 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1664 }
1665 
1666 static int
1667 skl_ddi_calculate_wrpll(int clock,
1668 			int ref_clock,
1669 			struct skl_wrpll_params *wrpll_params)
1670 {
1671 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1672 						 9000000000ULL,
1673 						 9600000000ULL };
1674 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1675 					    24, 28, 30, 32, 36, 40, 42, 44,
1676 					    48, 52, 54, 56, 60, 64, 66, 68,
1677 					    70, 72, 76, 78, 80, 84, 88, 90,
1678 					    92, 96, 98 };
1679 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1680 	static const struct {
1681 		const u8 *list;
1682 		int n_dividers;
1683 	} dividers[] = {
1684 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1685 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1686 	};
1687 	struct skl_wrpll_context ctx = {
1688 		.min_deviation = U64_MAX,
1689 	};
1690 	unsigned int dco, d, i;
1691 	unsigned int p0, p1, p2;
1692 	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1693 
1694 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1695 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1696 			for (i = 0; i < dividers[d].n_dividers; i++) {
1697 				unsigned int p = dividers[d].list[i];
1698 				u64 dco_freq = p * afe_clock;
1699 
1700 				skl_wrpll_try_divider(&ctx,
1701 						      dco_central_freq[dco],
1702 						      dco_freq,
1703 						      p);
1704 				/*
1705 				 * Skip the remaining dividers if we're sure to
1706 				 * have found the definitive divider, we can't
1707 				 * improve a 0 deviation.
1708 				 */
1709 				if (ctx.min_deviation == 0)
1710 					goto skip_remaining_dividers;
1711 			}
1712 		}
1713 
1714 skip_remaining_dividers:
1715 		/*
1716 		 * If a solution is found with an even divider, prefer
1717 		 * this one.
1718 		 */
1719 		if (d == 0 && ctx.p)
1720 			break;
1721 	}
1722 
1723 	if (!ctx.p)
1724 		return -EINVAL;
1725 
1726 	/*
1727 	 * gcc incorrectly analyses that these can be used without being
1728 	 * initialized. To be fair, it's hard to guess.
1729 	 */
1730 	p0 = p1 = p2 = 0;
1731 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1732 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1733 				  ctx.central_freq, p0, p1, p2);
1734 
1735 	return 0;
1736 }
1737 
1738 static int skl_ddi_wrpll_get_freq(struct intel_display *display,
1739 				  const struct intel_dpll *pll,
1740 				  const struct intel_dpll_hw_state *dpll_hw_state)
1741 {
1742 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1743 	int ref_clock = display->dpll.ref_clks.nssc;
1744 	u32 p0, p1, p2, dco_freq;
1745 
1746 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1747 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1748 
1749 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1750 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1751 	else
1752 		p1 = 1;
1753 
1754 
1755 	switch (p0) {
1756 	case DPLL_CFGCR2_PDIV_1:
1757 		p0 = 1;
1758 		break;
1759 	case DPLL_CFGCR2_PDIV_2:
1760 		p0 = 2;
1761 		break;
1762 	case DPLL_CFGCR2_PDIV_3:
1763 		p0 = 3;
1764 		break;
1765 	case DPLL_CFGCR2_PDIV_7_INVALID:
1766 		/*
1767 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1768 		 * handling it the same way as PDIV_7.
1769 		 */
1770 		drm_dbg_kms(display->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1771 		fallthrough;
1772 	case DPLL_CFGCR2_PDIV_7:
1773 		p0 = 7;
1774 		break;
1775 	default:
1776 		MISSING_CASE(p0);
1777 		return 0;
1778 	}
1779 
1780 	switch (p2) {
1781 	case DPLL_CFGCR2_KDIV_5:
1782 		p2 = 5;
1783 		break;
1784 	case DPLL_CFGCR2_KDIV_2:
1785 		p2 = 2;
1786 		break;
1787 	case DPLL_CFGCR2_KDIV_3:
1788 		p2 = 3;
1789 		break;
1790 	case DPLL_CFGCR2_KDIV_1:
1791 		p2 = 1;
1792 		break;
1793 	default:
1794 		MISSING_CASE(p2);
1795 		return 0;
1796 	}
1797 
1798 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1799 		   ref_clock;
1800 
1801 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1802 		    ref_clock / 0x8000;
1803 
1804 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
1805 		return 0;
1806 
1807 	return dco_freq / (p0 * p1 * p2 * 5);
1808 }
1809 
1810 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1811 {
1812 	struct intel_display *display = to_intel_display(crtc_state);
1813 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1814 	struct skl_wrpll_params wrpll_params = {};
1815 	int ret;
1816 
1817 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1818 				      display->dpll.ref_clks.nssc, &wrpll_params);
1819 	if (ret)
1820 		return ret;
1821 
1822 	/*
1823 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1824 	 * as the DPLL id in this function.
1825 	 */
1826 	hw_state->ctrl1 =
1827 		DPLL_CTRL1_OVERRIDE(0) |
1828 		DPLL_CTRL1_HDMI_MODE(0);
1829 
1830 	hw_state->cfgcr1 =
1831 		DPLL_CFGCR1_FREQ_ENABLE |
1832 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1833 		wrpll_params.dco_integer;
1834 
1835 	hw_state->cfgcr2 =
1836 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1837 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1838 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1839 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1840 		wrpll_params.central_freq;
1841 
1842 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(display, NULL,
1843 							&crtc_state->dpll_hw_state);
1844 
1845 	return 0;
1846 }
1847 
1848 static int
1849 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1850 {
1851 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1852 	u32 ctrl1;
1853 
1854 	/*
1855 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1856 	 * as the DPLL id in this function.
1857 	 */
1858 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1859 	switch (crtc_state->port_clock / 2) {
1860 	case 81000:
1861 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1862 		break;
1863 	case 135000:
1864 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1865 		break;
1866 	case 270000:
1867 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1868 		break;
1869 		/* eDP 1.4 rates */
1870 	case 162000:
1871 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1872 		break;
1873 	case 108000:
1874 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1875 		break;
1876 	case 216000:
1877 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1878 		break;
1879 	}
1880 
1881 	hw_state->ctrl1 = ctrl1;
1882 
1883 	return 0;
1884 }
1885 
1886 static int skl_ddi_lcpll_get_freq(struct intel_display *display,
1887 				  const struct intel_dpll *pll,
1888 				  const struct intel_dpll_hw_state *dpll_hw_state)
1889 {
1890 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1891 	int link_clock = 0;
1892 
1893 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1894 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1895 	case DPLL_CTRL1_LINK_RATE_810:
1896 		link_clock = 81000;
1897 		break;
1898 	case DPLL_CTRL1_LINK_RATE_1080:
1899 		link_clock = 108000;
1900 		break;
1901 	case DPLL_CTRL1_LINK_RATE_1350:
1902 		link_clock = 135000;
1903 		break;
1904 	case DPLL_CTRL1_LINK_RATE_1620:
1905 		link_clock = 162000;
1906 		break;
1907 	case DPLL_CTRL1_LINK_RATE_2160:
1908 		link_clock = 216000;
1909 		break;
1910 	case DPLL_CTRL1_LINK_RATE_2700:
1911 		link_clock = 270000;
1912 		break;
1913 	default:
1914 		drm_WARN(display->drm, 1, "Unsupported link rate\n");
1915 		break;
1916 	}
1917 
1918 	return link_clock * 2;
1919 }
1920 
1921 static int skl_compute_dpll(struct intel_atomic_state *state,
1922 			    struct intel_crtc *crtc,
1923 			    struct intel_encoder *encoder)
1924 {
1925 	struct intel_crtc_state *crtc_state =
1926 		intel_atomic_get_new_crtc_state(state, crtc);
1927 
1928 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1929 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1930 	else if (intel_crtc_has_dp_encoder(crtc_state))
1931 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1932 	else
1933 		return -EINVAL;
1934 }
1935 
1936 static int skl_get_dpll(struct intel_atomic_state *state,
1937 			struct intel_crtc *crtc,
1938 			struct intel_encoder *encoder)
1939 {
1940 	struct intel_crtc_state *crtc_state =
1941 		intel_atomic_get_new_crtc_state(state, crtc);
1942 	struct intel_dpll *pll;
1943 
1944 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1945 		pll = intel_find_dpll(state, crtc,
1946 				      &crtc_state->dpll_hw_state,
1947 				      BIT(DPLL_ID_SKL_DPLL0));
1948 	else
1949 		pll = intel_find_dpll(state, crtc,
1950 				      &crtc_state->dpll_hw_state,
1951 				      BIT(DPLL_ID_SKL_DPLL3) |
1952 				      BIT(DPLL_ID_SKL_DPLL2) |
1953 				      BIT(DPLL_ID_SKL_DPLL1));
1954 	if (!pll)
1955 		return -EINVAL;
1956 
1957 	intel_reference_dpll(state, crtc,
1958 			     pll, &crtc_state->dpll_hw_state);
1959 
1960 	crtc_state->intel_dpll = pll;
1961 
1962 	return 0;
1963 }
1964 
1965 static int skl_ddi_pll_get_freq(struct intel_display *display,
1966 				const struct intel_dpll *pll,
1967 				const struct intel_dpll_hw_state *dpll_hw_state)
1968 {
1969 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1970 
1971 	/*
1972 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1973 	 * the internal shift for each field
1974 	 */
1975 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1976 		return skl_ddi_wrpll_get_freq(display, pll, dpll_hw_state);
1977 	else
1978 		return skl_ddi_lcpll_get_freq(display, pll, dpll_hw_state);
1979 }
1980 
1981 static void skl_update_dpll_ref_clks(struct intel_display *display)
1982 {
1983 	/* No SSC ref */
1984 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
1985 }
1986 
1987 static void skl_dump_hw_state(struct drm_printer *p,
1988 			      const struct intel_dpll_hw_state *dpll_hw_state)
1989 {
1990 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1991 
1992 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1993 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1994 }
1995 
1996 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1997 				 const struct intel_dpll_hw_state *_b)
1998 {
1999 	const struct skl_dpll_hw_state *a = &_a->skl;
2000 	const struct skl_dpll_hw_state *b = &_b->skl;
2001 
2002 	return a->ctrl1 == b->ctrl1 &&
2003 		a->cfgcr1 == b->cfgcr1 &&
2004 		a->cfgcr2 == b->cfgcr2;
2005 }
2006 
2007 static const struct intel_dpll_funcs skl_ddi_pll_funcs = {
2008 	.enable = skl_ddi_pll_enable,
2009 	.disable = skl_ddi_pll_disable,
2010 	.get_hw_state = skl_ddi_pll_get_hw_state,
2011 	.get_freq = skl_ddi_pll_get_freq,
2012 };
2013 
2014 static const struct intel_dpll_funcs skl_ddi_dpll0_funcs = {
2015 	.enable = skl_ddi_dpll0_enable,
2016 	.disable = skl_ddi_dpll0_disable,
2017 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2018 	.get_freq = skl_ddi_pll_get_freq,
2019 };
2020 
2021 static const struct dpll_info skl_plls[] = {
2022 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2023 	  .always_on = true, },
2024 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2025 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2026 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2027 	{}
2028 };
2029 
2030 static const struct intel_dpll_mgr skl_pll_mgr = {
2031 	.dpll_info = skl_plls,
2032 	.compute_dplls = skl_compute_dpll,
2033 	.get_dplls = skl_get_dpll,
2034 	.put_dplls = intel_put_dpll,
2035 	.update_ref_clks = skl_update_dpll_ref_clks,
2036 	.dump_hw_state = skl_dump_hw_state,
2037 	.compare_hw_state = skl_compare_hw_state,
2038 };
2039 
2040 static void bxt_ddi_pll_enable(struct intel_display *display,
2041 			       struct intel_dpll *pll,
2042 			       const struct intel_dpll_hw_state *dpll_hw_state)
2043 {
2044 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2045 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2046 	enum dpio_phy phy = DPIO_PHY0;
2047 	enum dpio_channel ch = DPIO_CH0;
2048 	u32 temp;
2049 	int ret;
2050 
2051 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2052 
2053 	/* Non-SSC reference */
2054 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2055 
2056 	if (display->platform.geminilake) {
2057 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2058 			     0, PORT_PLL_POWER_ENABLE);
2059 
2060 		ret = intel_de_wait_for_set_us(display,
2061 					       BXT_PORT_PLL_ENABLE(port),
2062 					       PORT_PLL_POWER_STATE, 200);
2063 		if (ret)
2064 			drm_err(display->drm,
2065 				"Power state not set for PLL:%d\n", port);
2066 	}
2067 
2068 	/* Disable 10 bit clock */
2069 	intel_de_rmw(display, BXT_PORT_PLL_EBB_4(phy, ch),
2070 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2071 
2072 	/* Write P1 & P2 */
2073 	intel_de_rmw(display, BXT_PORT_PLL_EBB_0(phy, ch),
2074 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2075 
2076 	/* Write M2 integer */
2077 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 0),
2078 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2079 
2080 	/* Write N */
2081 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 1),
2082 		     PORT_PLL_N_MASK, hw_state->pll1);
2083 
2084 	/* Write M2 fraction */
2085 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 2),
2086 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2087 
2088 	/* Write M2 fraction enable */
2089 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 3),
2090 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2091 
2092 	/* Write coeff */
2093 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2094 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2095 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2096 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2097 	temp |= hw_state->pll6;
2098 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 6), temp);
2099 
2100 	/* Write calibration val */
2101 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 8),
2102 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2103 
2104 	intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 9),
2105 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2106 
2107 	temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2108 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2109 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2110 	temp |= hw_state->pll10;
2111 	intel_de_write(display, BXT_PORT_PLL(phy, ch, 10), temp);
2112 
2113 	/* Recalibrate with new settings */
2114 	temp = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2115 	temp |= PORT_PLL_RECALIBRATE;
2116 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2117 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2118 	temp |= hw_state->ebb4;
2119 	intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2120 
2121 	/* Enable PLL */
2122 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2123 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2124 
2125 	ret = intel_de_wait_for_set_us(display, BXT_PORT_PLL_ENABLE(port),
2126 				       PORT_PLL_LOCK, 200);
2127 	if (ret)
2128 		drm_err(display->drm, "PLL %d not locked\n", port);
2129 
2130 	if (display->platform.geminilake) {
2131 		temp = intel_de_read(display, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2132 		temp |= DCC_DELAY_RANGE_2;
2133 		intel_de_write(display, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2134 	}
2135 
2136 	/*
2137 	 * While we write to the group register to program all lanes at once we
2138 	 * can read only lane registers and we pick lanes 0/1 for that.
2139 	 */
2140 	temp = intel_de_read(display, BXT_PORT_PCS_DW12_LN01(phy, ch));
2141 	temp &= ~LANE_STAGGER_MASK;
2142 	temp &= ~LANESTAGGER_STRAP_OVRD;
2143 	temp |= hw_state->pcsdw12;
2144 	intel_de_write(display, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2145 }
2146 
2147 static void bxt_ddi_pll_disable(struct intel_display *display,
2148 				struct intel_dpll *pll)
2149 {
2150 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2151 	int ret;
2152 
2153 	intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2154 	intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
2155 
2156 	if (display->platform.geminilake) {
2157 		intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
2158 			     PORT_PLL_POWER_ENABLE, 0);
2159 
2160 		ret = intel_de_wait_for_clear_us(display,
2161 						 BXT_PORT_PLL_ENABLE(port),
2162 						 PORT_PLL_POWER_STATE, 200);
2163 		if (ret)
2164 			drm_err(display->drm,
2165 				"Power state not reset for PLL:%d\n", port);
2166 	}
2167 }
2168 
2169 static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
2170 				     struct intel_dpll *pll,
2171 				     struct intel_dpll_hw_state *dpll_hw_state)
2172 {
2173 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2174 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2175 	intel_wakeref_t wakeref;
2176 	enum dpio_phy phy;
2177 	enum dpio_channel ch;
2178 	u32 val;
2179 	bool ret;
2180 
2181 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2182 
2183 	wakeref = intel_display_power_get_if_enabled(display,
2184 						     POWER_DOMAIN_DISPLAY_CORE);
2185 	if (!wakeref)
2186 		return false;
2187 
2188 	ret = false;
2189 
2190 	val = intel_de_read(display, BXT_PORT_PLL_ENABLE(port));
2191 	if (!(val & PORT_PLL_ENABLE))
2192 		goto out;
2193 
2194 	hw_state->ebb0 = intel_de_read(display, BXT_PORT_PLL_EBB_0(phy, ch));
2195 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2196 
2197 	hw_state->ebb4 = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
2198 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2199 
2200 	hw_state->pll0 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 0));
2201 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2202 
2203 	hw_state->pll1 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 1));
2204 	hw_state->pll1 &= PORT_PLL_N_MASK;
2205 
2206 	hw_state->pll2 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 2));
2207 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2208 
2209 	hw_state->pll3 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 3));
2210 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2211 
2212 	hw_state->pll6 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
2213 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2214 			  PORT_PLL_INT_COEFF_MASK |
2215 			  PORT_PLL_GAIN_CTL_MASK;
2216 
2217 	hw_state->pll8 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 8));
2218 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2219 
2220 	hw_state->pll9 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 9));
2221 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2222 
2223 	hw_state->pll10 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
2224 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2225 			   PORT_PLL_DCO_AMP_MASK;
2226 
2227 	/*
2228 	 * While we write to the group register to program all lanes at once we
2229 	 * can read only lane registers. We configure all lanes the same way, so
2230 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2231 	 */
2232 	hw_state->pcsdw12 = intel_de_read(display,
2233 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2234 	if (intel_de_read(display, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2235 		drm_dbg(display->drm,
2236 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2237 			hw_state->pcsdw12,
2238 			intel_de_read(display,
2239 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2240 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2241 
2242 	ret = true;
2243 
2244 out:
2245 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2246 
2247 	return ret;
2248 }
2249 
2250 /* pre-calculated values for DP linkrates */
2251 static const struct dpll bxt_dp_clk_val[] = {
2252 	/* m2 is .22 binary fixed point */
2253 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2254 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2255 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2256 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2257 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2258 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2259 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2260 };
2261 
2262 static int
2263 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2264 			  struct dpll *clk_div)
2265 {
2266 	struct intel_display *display = to_intel_display(crtc_state);
2267 
2268 	/* Calculate HDMI div */
2269 	/*
2270 	 * FIXME: tie the following calculation into
2271 	 * i9xx_crtc_compute_clock
2272 	 */
2273 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2274 		return -EINVAL;
2275 
2276 	drm_WARN_ON(display->drm, clk_div->m1 != 2);
2277 
2278 	return 0;
2279 }
2280 
2281 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2282 				    struct dpll *clk_div)
2283 {
2284 	struct intel_display *display = to_intel_display(crtc_state);
2285 	int i;
2286 
2287 	*clk_div = bxt_dp_clk_val[0];
2288 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2289 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2290 			*clk_div = bxt_dp_clk_val[i];
2291 			break;
2292 		}
2293 	}
2294 
2295 	chv_calc_dpll_params(display->dpll.ref_clks.nssc, clk_div);
2296 
2297 	drm_WARN_ON(display->drm, clk_div->vco == 0 ||
2298 		    clk_div->dot != crtc_state->port_clock);
2299 }
2300 
2301 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2302 				     const struct dpll *clk_div)
2303 {
2304 	struct intel_display *display = to_intel_display(crtc_state);
2305 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2306 	int clock = crtc_state->port_clock;
2307 	int vco = clk_div->vco;
2308 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2309 	u32 lanestagger;
2310 
2311 	if (vco >= 6200000 && vco <= 6700000) {
2312 		prop_coef = 4;
2313 		int_coef = 9;
2314 		gain_ctl = 3;
2315 		targ_cnt = 8;
2316 	} else if ((vco > 5400000 && vco < 6200000) ||
2317 			(vco >= 4800000 && vco < 5400000)) {
2318 		prop_coef = 5;
2319 		int_coef = 11;
2320 		gain_ctl = 3;
2321 		targ_cnt = 9;
2322 	} else if (vco == 5400000) {
2323 		prop_coef = 3;
2324 		int_coef = 8;
2325 		gain_ctl = 1;
2326 		targ_cnt = 9;
2327 	} else {
2328 		drm_err(display->drm, "Invalid VCO\n");
2329 		return -EINVAL;
2330 	}
2331 
2332 	if (clock > 270000)
2333 		lanestagger = 0x18;
2334 	else if (clock > 135000)
2335 		lanestagger = 0x0d;
2336 	else if (clock > 67000)
2337 		lanestagger = 0x07;
2338 	else if (clock > 33000)
2339 		lanestagger = 0x04;
2340 	else
2341 		lanestagger = 0x02;
2342 
2343 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2344 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2345 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2346 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2347 
2348 	if (clk_div->m2 & 0x3fffff)
2349 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2350 
2351 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2352 		PORT_PLL_INT_COEFF(int_coef) |
2353 		PORT_PLL_GAIN_CTL(gain_ctl);
2354 
2355 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2356 
2357 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2358 
2359 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2360 		PORT_PLL_DCO_AMP_OVR_EN_H;
2361 
2362 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2363 
2364 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2365 
2366 	return 0;
2367 }
2368 
2369 static int bxt_ddi_pll_get_freq(struct intel_display *display,
2370 				const struct intel_dpll *pll,
2371 				const struct intel_dpll_hw_state *dpll_hw_state)
2372 {
2373 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2374 	struct dpll clock;
2375 
2376 	clock.m1 = 2;
2377 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2378 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2379 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2380 					  hw_state->pll2);
2381 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2382 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2383 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2384 
2385 	return chv_calc_dpll_params(display->dpll.ref_clks.nssc, &clock);
2386 }
2387 
2388 static int
2389 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2390 {
2391 	struct dpll clk_div = {};
2392 
2393 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2394 
2395 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2396 }
2397 
2398 static int
2399 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2400 {
2401 	struct intel_display *display = to_intel_display(crtc_state);
2402 	struct dpll clk_div = {};
2403 	int ret;
2404 
2405 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2406 
2407 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2408 	if (ret)
2409 		return ret;
2410 
2411 	crtc_state->port_clock = bxt_ddi_pll_get_freq(display, NULL,
2412 						      &crtc_state->dpll_hw_state);
2413 
2414 	return 0;
2415 }
2416 
2417 static int bxt_compute_dpll(struct intel_atomic_state *state,
2418 			    struct intel_crtc *crtc,
2419 			    struct intel_encoder *encoder)
2420 {
2421 	struct intel_crtc_state *crtc_state =
2422 		intel_atomic_get_new_crtc_state(state, crtc);
2423 
2424 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2425 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2426 	else if (intel_crtc_has_dp_encoder(crtc_state))
2427 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2428 	else
2429 		return -EINVAL;
2430 }
2431 
2432 static int bxt_get_dpll(struct intel_atomic_state *state,
2433 			struct intel_crtc *crtc,
2434 			struct intel_encoder *encoder)
2435 {
2436 	struct intel_display *display = to_intel_display(state);
2437 	struct intel_crtc_state *crtc_state =
2438 		intel_atomic_get_new_crtc_state(state, crtc);
2439 	struct intel_dpll *pll;
2440 	enum intel_dpll_id id;
2441 
2442 	/* 1:1 mapping between ports and PLLs */
2443 	id = (enum intel_dpll_id) encoder->port;
2444 	pll = intel_get_dpll_by_id(display, id);
2445 
2446 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2447 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2448 
2449 	intel_reference_dpll(state, crtc,
2450 			     pll, &crtc_state->dpll_hw_state);
2451 
2452 	crtc_state->intel_dpll = pll;
2453 
2454 	return 0;
2455 }
2456 
2457 static void bxt_update_dpll_ref_clks(struct intel_display *display)
2458 {
2459 	display->dpll.ref_clks.ssc = 100000;
2460 	display->dpll.ref_clks.nssc = 100000;
2461 	/* DSI non-SSC ref 19.2MHz */
2462 }
2463 
2464 static void bxt_dump_hw_state(struct drm_printer *p,
2465 			      const struct intel_dpll_hw_state *dpll_hw_state)
2466 {
2467 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2468 
2469 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2470 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2471 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2472 		   hw_state->ebb0, hw_state->ebb4,
2473 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2474 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2475 		   hw_state->pcsdw12);
2476 }
2477 
2478 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2479 				 const struct intel_dpll_hw_state *_b)
2480 {
2481 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2482 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2483 
2484 	return a->ebb0 == b->ebb0 &&
2485 		a->ebb4 == b->ebb4 &&
2486 		a->pll0 == b->pll0 &&
2487 		a->pll1 == b->pll1 &&
2488 		a->pll2 == b->pll2 &&
2489 		a->pll3 == b->pll3 &&
2490 		a->pll6 == b->pll6 &&
2491 		a->pll8 == b->pll8 &&
2492 		a->pll10 == b->pll10 &&
2493 		a->pcsdw12 == b->pcsdw12;
2494 }
2495 
2496 static const struct intel_dpll_funcs bxt_ddi_pll_funcs = {
2497 	.enable = bxt_ddi_pll_enable,
2498 	.disable = bxt_ddi_pll_disable,
2499 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2500 	.get_freq = bxt_ddi_pll_get_freq,
2501 };
2502 
2503 static const struct dpll_info bxt_plls[] = {
2504 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2505 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2506 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2507 	{}
2508 };
2509 
2510 static const struct intel_dpll_mgr bxt_pll_mgr = {
2511 	.dpll_info = bxt_plls,
2512 	.compute_dplls = bxt_compute_dpll,
2513 	.get_dplls = bxt_get_dpll,
2514 	.put_dplls = intel_put_dpll,
2515 	.update_ref_clks = bxt_update_dpll_ref_clks,
2516 	.dump_hw_state = bxt_dump_hw_state,
2517 	.compare_hw_state = bxt_compare_hw_state,
2518 };
2519 
2520 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2521 				      int *qdiv, int *kdiv)
2522 {
2523 	/* even dividers */
2524 	if (bestdiv % 2 == 0) {
2525 		if (bestdiv == 2) {
2526 			*pdiv = 2;
2527 			*qdiv = 1;
2528 			*kdiv = 1;
2529 		} else if (bestdiv % 4 == 0) {
2530 			*pdiv = 2;
2531 			*qdiv = bestdiv / 4;
2532 			*kdiv = 2;
2533 		} else if (bestdiv % 6 == 0) {
2534 			*pdiv = 3;
2535 			*qdiv = bestdiv / 6;
2536 			*kdiv = 2;
2537 		} else if (bestdiv % 5 == 0) {
2538 			*pdiv = 5;
2539 			*qdiv = bestdiv / 10;
2540 			*kdiv = 2;
2541 		} else if (bestdiv % 14 == 0) {
2542 			*pdiv = 7;
2543 			*qdiv = bestdiv / 14;
2544 			*kdiv = 2;
2545 		}
2546 	} else {
2547 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2548 			*pdiv = bestdiv;
2549 			*qdiv = 1;
2550 			*kdiv = 1;
2551 		} else { /* 9, 15, 21 */
2552 			*pdiv = bestdiv / 3;
2553 			*qdiv = 1;
2554 			*kdiv = 3;
2555 		}
2556 	}
2557 }
2558 
2559 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2560 				      u32 dco_freq, u32 ref_freq,
2561 				      int pdiv, int qdiv, int kdiv)
2562 {
2563 	u32 dco;
2564 
2565 	switch (kdiv) {
2566 	case 1:
2567 		params->kdiv = 1;
2568 		break;
2569 	case 2:
2570 		params->kdiv = 2;
2571 		break;
2572 	case 3:
2573 		params->kdiv = 4;
2574 		break;
2575 	default:
2576 		WARN(1, "Incorrect KDiv\n");
2577 	}
2578 
2579 	switch (pdiv) {
2580 	case 2:
2581 		params->pdiv = 1;
2582 		break;
2583 	case 3:
2584 		params->pdiv = 2;
2585 		break;
2586 	case 5:
2587 		params->pdiv = 4;
2588 		break;
2589 	case 7:
2590 		params->pdiv = 8;
2591 		break;
2592 	default:
2593 		WARN(1, "Incorrect PDiv\n");
2594 	}
2595 
2596 	WARN_ON(kdiv != 2 && qdiv != 1);
2597 
2598 	params->qdiv_ratio = qdiv;
2599 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2600 
2601 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2602 
2603 	params->dco_integer = dco >> 15;
2604 	params->dco_fraction = dco & 0x7fff;
2605 }
2606 
2607 /*
2608  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2609  * Program half of the nominal DCO divider fraction value.
2610  */
2611 static bool
2612 ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
2613 {
2614 	return ((display->platform.elkhartlake &&
2615 		 IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
2616 		DISPLAY_VER(display) >= 12) &&
2617 		display->dpll.ref_clks.nssc == 38400;
2618 }
2619 
2620 struct icl_combo_pll_params {
2621 	int clock;
2622 	struct skl_wrpll_params wrpll;
2623 };
2624 
2625 /*
2626  * These values alrea already adjusted: they're the bits we write to the
2627  * registers, not the logical values.
2628  */
2629 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2630 	{ 540000,
2631 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2632 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2633 	{ 270000,
2634 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2635 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2636 	{ 162000,
2637 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2638 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2639 	{ 324000,
2640 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2641 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2642 	{ 216000,
2643 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2644 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2645 	{ 432000,
2646 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2647 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2648 	{ 648000,
2649 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2650 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2651 	{ 810000,
2652 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2653 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2654 };
2655 
2656 
2657 /* Also used for 38.4 MHz values. */
2658 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2659 	{ 540000,
2660 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2661 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2662 	{ 270000,
2663 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2664 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2665 	{ 162000,
2666 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2667 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2668 	{ 324000,
2669 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2670 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2671 	{ 216000,
2672 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2673 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2674 	{ 432000,
2675 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2676 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2677 	{ 648000,
2678 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2679 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2680 	{ 810000,
2681 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2682 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2683 };
2684 
2685 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2686 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2687 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2688 };
2689 
2690 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2691 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2692 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2693 };
2694 
2695 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2696 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2697 	/* the following params are unused */
2698 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2699 };
2700 
2701 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2702 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2703 	/* the following params are unused */
2704 };
2705 
2706 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2707 				 struct skl_wrpll_params *pll_params)
2708 {
2709 	struct intel_display *display = to_intel_display(crtc_state);
2710 	const struct icl_combo_pll_params *params =
2711 		display->dpll.ref_clks.nssc == 24000 ?
2712 		icl_dp_combo_pll_24MHz_values :
2713 		icl_dp_combo_pll_19_2MHz_values;
2714 	int clock = crtc_state->port_clock;
2715 	int i;
2716 
2717 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2718 		if (clock == params[i].clock) {
2719 			*pll_params = params[i].wrpll;
2720 			return 0;
2721 		}
2722 	}
2723 
2724 	MISSING_CASE(clock);
2725 	return -EINVAL;
2726 }
2727 
2728 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2729 			    struct skl_wrpll_params *pll_params)
2730 {
2731 	struct intel_display *display = to_intel_display(crtc_state);
2732 
2733 	if (DISPLAY_VER(display) >= 12) {
2734 		switch (display->dpll.ref_clks.nssc) {
2735 		default:
2736 			MISSING_CASE(display->dpll.ref_clks.nssc);
2737 			fallthrough;
2738 		case 19200:
2739 		case 38400:
2740 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2741 			break;
2742 		case 24000:
2743 			*pll_params = tgl_tbt_pll_24MHz_values;
2744 			break;
2745 		}
2746 	} else {
2747 		switch (display->dpll.ref_clks.nssc) {
2748 		default:
2749 			MISSING_CASE(display->dpll.ref_clks.nssc);
2750 			fallthrough;
2751 		case 19200:
2752 		case 38400:
2753 			*pll_params = icl_tbt_pll_19_2MHz_values;
2754 			break;
2755 		case 24000:
2756 			*pll_params = icl_tbt_pll_24MHz_values;
2757 			break;
2758 		}
2759 	}
2760 
2761 	return 0;
2762 }
2763 
2764 static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
2765 				    const struct intel_dpll *pll,
2766 				    const struct intel_dpll_hw_state *dpll_hw_state)
2767 {
2768 	/*
2769 	 * The PLL outputs multiple frequencies at the same time, selection is
2770 	 * made at DDI clock mux level.
2771 	 */
2772 	drm_WARN_ON(display->drm, 1);
2773 
2774 	return 0;
2775 }
2776 
2777 static int icl_wrpll_ref_clock(struct intel_display *display)
2778 {
2779 	int ref_clock = display->dpll.ref_clks.nssc;
2780 
2781 	/*
2782 	 * For ICL+, the spec states: if reference frequency is 38.4,
2783 	 * use 19.2 because the DPLL automatically divides that by 2.
2784 	 */
2785 	if (ref_clock == 38400)
2786 		ref_clock = 19200;
2787 
2788 	return ref_clock;
2789 }
2790 
2791 static int
2792 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2793 	       struct skl_wrpll_params *wrpll_params)
2794 {
2795 	struct intel_display *display = to_intel_display(crtc_state);
2796 	int ref_clock = icl_wrpll_ref_clock(display);
2797 	u32 afe_clock = crtc_state->port_clock * 5;
2798 	u32 dco_min = 7998000;
2799 	u32 dco_max = 10000000;
2800 	u32 dco_mid = (dco_min + dco_max) / 2;
2801 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2802 					 18, 20, 24, 28, 30, 32,  36,  40,
2803 					 42, 44, 48, 50, 52, 54,  56,  60,
2804 					 64, 66, 68, 70, 72, 76,  78,  80,
2805 					 84, 88, 90, 92, 96, 98, 100, 102,
2806 					  3,  5,  7,  9, 15, 21 };
2807 	u32 dco, best_dco = 0, dco_centrality = 0;
2808 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2809 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2810 
2811 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2812 		dco = afe_clock * dividers[d];
2813 
2814 		if (dco <= dco_max && dco >= dco_min) {
2815 			dco_centrality = abs(dco - dco_mid);
2816 
2817 			if (dco_centrality < best_dco_centrality) {
2818 				best_dco_centrality = dco_centrality;
2819 				best_div = dividers[d];
2820 				best_dco = dco;
2821 			}
2822 		}
2823 	}
2824 
2825 	if (best_div == 0)
2826 		return -EINVAL;
2827 
2828 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2829 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2830 				  pdiv, qdiv, kdiv);
2831 
2832 	return 0;
2833 }
2834 
2835 static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
2836 				      const struct intel_dpll *pll,
2837 				      const struct intel_dpll_hw_state *dpll_hw_state)
2838 {
2839 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2840 	int ref_clock = icl_wrpll_ref_clock(display);
2841 	u32 dco_fraction;
2842 	u32 p0, p1, p2, dco_freq;
2843 
2844 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2845 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2846 
2847 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2848 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2849 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2850 	else
2851 		p1 = 1;
2852 
2853 	switch (p0) {
2854 	case DPLL_CFGCR1_PDIV_2:
2855 		p0 = 2;
2856 		break;
2857 	case DPLL_CFGCR1_PDIV_3:
2858 		p0 = 3;
2859 		break;
2860 	case DPLL_CFGCR1_PDIV_5:
2861 		p0 = 5;
2862 		break;
2863 	case DPLL_CFGCR1_PDIV_7:
2864 		p0 = 7;
2865 		break;
2866 	}
2867 
2868 	switch (p2) {
2869 	case DPLL_CFGCR1_KDIV_1:
2870 		p2 = 1;
2871 		break;
2872 	case DPLL_CFGCR1_KDIV_2:
2873 		p2 = 2;
2874 		break;
2875 	case DPLL_CFGCR1_KDIV_3:
2876 		p2 = 3;
2877 		break;
2878 	}
2879 
2880 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2881 		   ref_clock;
2882 
2883 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2884 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2885 
2886 	if (ehl_combo_pll_div_frac_wa_needed(display))
2887 		dco_fraction *= 2;
2888 
2889 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2890 
2891 	if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
2892 		return 0;
2893 
2894 	return dco_freq / (p0 * p1 * p2 * 5);
2895 }
2896 
2897 static void icl_calc_dpll_state(struct intel_display *display,
2898 				const struct skl_wrpll_params *pll_params,
2899 				struct intel_dpll_hw_state *dpll_hw_state)
2900 {
2901 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2902 	u32 dco_fraction = pll_params->dco_fraction;
2903 
2904 	if (ehl_combo_pll_div_frac_wa_needed(display))
2905 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2906 
2907 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2908 			    pll_params->dco_integer;
2909 
2910 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2911 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2912 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2913 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2914 
2915 	if (DISPLAY_VER(display) >= 12)
2916 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2917 	else
2918 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2919 
2920 	if (display->vbt.override_afc_startup)
2921 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(display->vbt.override_afc_startup_val);
2922 }
2923 
2924 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2925 				    u32 *target_dco_khz,
2926 				    struct icl_dpll_hw_state *hw_state,
2927 				    bool is_dkl)
2928 {
2929 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2930 	u32 dco_min_freq, dco_max_freq;
2931 	unsigned int i;
2932 	int div2;
2933 
2934 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2935 	dco_max_freq = is_dp ? 8100000 : 10000000;
2936 
2937 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2938 		int div1 = div1_vals[i];
2939 
2940 		for (div2 = 10; div2 > 0; div2--) {
2941 			int dco = div1 * div2 * clock_khz * 5;
2942 			int a_divratio, tlinedrv, inputsel;
2943 			u32 hsdiv;
2944 
2945 			if (dco < dco_min_freq || dco > dco_max_freq)
2946 				continue;
2947 
2948 			if (div2 >= 2) {
2949 				/*
2950 				 * Note: a_divratio not matching TGL BSpec
2951 				 * algorithm but matching hardcoded values and
2952 				 * working on HW for DP alt-mode at least
2953 				 */
2954 				a_divratio = is_dp ? 10 : 5;
2955 				tlinedrv = is_dkl ? 1 : 2;
2956 			} else {
2957 				a_divratio = 5;
2958 				tlinedrv = 0;
2959 			}
2960 			inputsel = is_dp ? 0 : 1;
2961 
2962 			switch (div1) {
2963 			default:
2964 				MISSING_CASE(div1);
2965 				fallthrough;
2966 			case 2:
2967 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2968 				break;
2969 			case 3:
2970 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2971 				break;
2972 			case 5:
2973 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2974 				break;
2975 			case 7:
2976 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2977 				break;
2978 			}
2979 
2980 			*target_dco_khz = dco;
2981 
2982 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2983 
2984 			hw_state->mg_clktop2_coreclkctl1 =
2985 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2986 
2987 			hw_state->mg_clktop2_hsclkctl =
2988 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2989 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2990 				hsdiv |
2991 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2992 
2993 			return 0;
2994 		}
2995 	}
2996 
2997 	return -EINVAL;
2998 }
2999 
3000 /*
3001  * The specification for this function uses real numbers, so the math had to be
3002  * adapted to integer-only calculation, that's why it looks so different.
3003  */
3004 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3005 				 struct intel_dpll_hw_state *dpll_hw_state)
3006 {
3007 	struct intel_display *display = to_intel_display(crtc_state);
3008 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3009 	int refclk_khz = display->dpll.ref_clks.nssc;
3010 	int clock = crtc_state->port_clock;
3011 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3012 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3013 	u32 prop_coeff, int_coeff;
3014 	u32 tdc_targetcnt, feedfwgain;
3015 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3016 	u64 tmp;
3017 	bool use_ssc = false;
3018 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3019 	bool is_dkl = DISPLAY_VER(display) >= 12;
3020 	int ret;
3021 
3022 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3023 				       hw_state, is_dkl);
3024 	if (ret)
3025 		return ret;
3026 
3027 	m1div = 2;
3028 	m2div_int = dco_khz / (refclk_khz * m1div);
3029 	if (m2div_int > 255) {
3030 		if (!is_dkl) {
3031 			m1div = 4;
3032 			m2div_int = dco_khz / (refclk_khz * m1div);
3033 		}
3034 
3035 		if (m2div_int > 255)
3036 			return -EINVAL;
3037 	}
3038 	m2div_rem = dco_khz % (refclk_khz * m1div);
3039 
3040 	tmp = (u64)m2div_rem * (1 << 22);
3041 	do_div(tmp, refclk_khz * m1div);
3042 	m2div_frac = tmp;
3043 
3044 	switch (refclk_khz) {
3045 	case 19200:
3046 		iref_ndiv = 1;
3047 		iref_trim = 28;
3048 		iref_pulse_w = 1;
3049 		break;
3050 	case 24000:
3051 		iref_ndiv = 1;
3052 		iref_trim = 25;
3053 		iref_pulse_w = 2;
3054 		break;
3055 	case 38400:
3056 		iref_ndiv = 2;
3057 		iref_trim = 28;
3058 		iref_pulse_w = 1;
3059 		break;
3060 	default:
3061 		MISSING_CASE(refclk_khz);
3062 		return -EINVAL;
3063 	}
3064 
3065 	/*
3066 	 * tdc_res = 0.000003
3067 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3068 	 *
3069 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3070 	 * was supposed to be a division, but we rearranged the operations of
3071 	 * the formula to avoid early divisions so we don't multiply the
3072 	 * rounding errors.
3073 	 *
3074 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3075 	 * we also rearrange to work with integers.
3076 	 *
3077 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3078 	 * last division by 10.
3079 	 */
3080 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3081 
3082 	/*
3083 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3084 	 * 32 bits. That's not a problem since we round the division down
3085 	 * anyway.
3086 	 */
3087 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3088 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3089 
3090 	if (dco_khz >= 9000000) {
3091 		prop_coeff = 5;
3092 		int_coeff = 10;
3093 	} else {
3094 		prop_coeff = 4;
3095 		int_coeff = 8;
3096 	}
3097 
3098 	if (use_ssc) {
3099 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3100 		do_div(tmp, refclk_khz * m1div * 10000);
3101 		ssc_stepsize = tmp;
3102 
3103 		tmp = mul_u32_u32(dco_khz, 1000);
3104 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3105 	} else {
3106 		ssc_stepsize = 0;
3107 		ssc_steplen = 0;
3108 	}
3109 	ssc_steplog = 4;
3110 
3111 	/* write pll_state calculations */
3112 	if (is_dkl) {
3113 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3114 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3115 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3116 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3117 		if (display->vbt.override_afc_startup) {
3118 			u8 val = display->vbt.override_afc_startup_val;
3119 
3120 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3121 		}
3122 
3123 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3124 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3125 
3126 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3127 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3128 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3129 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3130 
3131 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3132 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3133 
3134 		hw_state->mg_pll_tdc_coldst_bias =
3135 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3136 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3137 
3138 	} else {
3139 		hw_state->mg_pll_div0 =
3140 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3141 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3142 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3143 
3144 		hw_state->mg_pll_div1 =
3145 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3146 			MG_PLL_DIV1_DITHER_DIV_2 |
3147 			MG_PLL_DIV1_NDIVRATIO(1) |
3148 			MG_PLL_DIV1_FBPREDIV(m1div);
3149 
3150 		hw_state->mg_pll_lf =
3151 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3152 			MG_PLL_LF_AFCCNTSEL_512 |
3153 			MG_PLL_LF_GAINCTRL(1) |
3154 			MG_PLL_LF_INT_COEFF(int_coeff) |
3155 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3156 
3157 		hw_state->mg_pll_frac_lock =
3158 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3159 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3160 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3161 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3162 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3163 		if (use_ssc || m2div_rem > 0)
3164 			hw_state->mg_pll_frac_lock |=
3165 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3166 
3167 		hw_state->mg_pll_ssc =
3168 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3169 			MG_PLL_SSC_TYPE(2) |
3170 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3171 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3172 			MG_PLL_SSC_FLLEN |
3173 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3174 
3175 		hw_state->mg_pll_tdc_coldst_bias =
3176 			MG_PLL_TDC_COLDST_COLDSTART |
3177 			MG_PLL_TDC_COLDST_IREFINT_EN |
3178 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3179 			MG_PLL_TDC_TDCOVCCORR_EN |
3180 			MG_PLL_TDC_TDCSEL(3);
3181 
3182 		hw_state->mg_pll_bias =
3183 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3184 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3185 			MG_PLL_BIAS_BIAS_BONUS(10) |
3186 			MG_PLL_BIAS_BIASCAL_EN |
3187 			MG_PLL_BIAS_CTRIM(12) |
3188 			MG_PLL_BIAS_VREF_RDAC(4) |
3189 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3190 
3191 		if (refclk_khz == 38400) {
3192 			hw_state->mg_pll_tdc_coldst_bias_mask =
3193 				MG_PLL_TDC_COLDST_COLDSTART;
3194 			hw_state->mg_pll_bias_mask = 0;
3195 		} else {
3196 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3197 			hw_state->mg_pll_bias_mask = -1U;
3198 		}
3199 
3200 		hw_state->mg_pll_tdc_coldst_bias &=
3201 			hw_state->mg_pll_tdc_coldst_bias_mask;
3202 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3203 	}
3204 
3205 	return 0;
3206 }
3207 
3208 static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
3209 				   const struct intel_dpll *pll,
3210 				   const struct intel_dpll_hw_state *dpll_hw_state)
3211 {
3212 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3213 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3214 	u64 tmp;
3215 
3216 	ref_clock = display->dpll.ref_clks.nssc;
3217 
3218 	if (DISPLAY_VER(display) >= 12) {
3219 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3220 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3221 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3222 
3223 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3224 			m2_frac = hw_state->mg_pll_bias &
3225 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3226 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3227 		} else {
3228 			m2_frac = 0;
3229 		}
3230 	} else {
3231 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3232 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3233 
3234 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3235 			m2_frac = hw_state->mg_pll_div0 &
3236 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3237 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3238 		} else {
3239 			m2_frac = 0;
3240 		}
3241 	}
3242 
3243 	switch (hw_state->mg_clktop2_hsclkctl &
3244 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3245 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3246 		div1 = 2;
3247 		break;
3248 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3249 		div1 = 3;
3250 		break;
3251 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3252 		div1 = 5;
3253 		break;
3254 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3255 		div1 = 7;
3256 		break;
3257 	default:
3258 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3259 		return 0;
3260 	}
3261 
3262 	div2 = (hw_state->mg_clktop2_hsclkctl &
3263 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3264 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3265 
3266 	/* div2 value of 0 is same as 1 means no div */
3267 	if (div2 == 0)
3268 		div2 = 1;
3269 
3270 	/*
3271 	 * Adjust the original formula to delay the division by 2^22 in order to
3272 	 * minimize possible rounding errors.
3273 	 */
3274 	tmp = (u64)m1 * m2_int * ref_clock +
3275 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3276 	tmp = div_u64(tmp, 5 * div1 * div2);
3277 
3278 	return tmp;
3279 }
3280 
3281 /**
3282  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3283  * @crtc_state: state for the CRTC to select the DPLL for
3284  * @port_dpll_id: the active @port_dpll_id to select
3285  *
3286  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3287  * CRTC.
3288  */
3289 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3290 			      enum icl_port_dpll_id port_dpll_id)
3291 {
3292 	struct icl_port_dpll *port_dpll =
3293 		&crtc_state->icl_port_dplls[port_dpll_id];
3294 
3295 	crtc_state->intel_dpll = port_dpll->pll;
3296 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3297 }
3298 
3299 static void icl_update_active_dpll(struct intel_atomic_state *state,
3300 				   struct intel_crtc *crtc,
3301 				   struct intel_encoder *encoder)
3302 {
3303 	struct intel_crtc_state *crtc_state =
3304 		intel_atomic_get_new_crtc_state(state, crtc);
3305 	struct intel_digital_port *primary_port;
3306 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3307 
3308 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3309 		enc_to_mst(encoder)->primary :
3310 		enc_to_dig_port(encoder);
3311 
3312 	if (primary_port &&
3313 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3314 	     intel_tc_port_in_legacy_mode(primary_port)))
3315 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3316 
3317 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3318 }
3319 
3320 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3321 				      struct intel_crtc *crtc)
3322 {
3323 	struct intel_display *display = to_intel_display(state);
3324 	struct intel_crtc_state *crtc_state =
3325 		intel_atomic_get_new_crtc_state(state, crtc);
3326 	struct icl_port_dpll *port_dpll =
3327 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3328 	struct skl_wrpll_params pll_params = {};
3329 	int ret;
3330 
3331 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3332 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3333 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3334 	else
3335 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3336 
3337 	if (ret)
3338 		return ret;
3339 
3340 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3341 
3342 	/* this is mainly for the fastset check */
3343 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3344 
3345 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(display, NULL,
3346 							    &port_dpll->hw_state);
3347 
3348 	return 0;
3349 }
3350 
3351 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3352 				  struct intel_crtc *crtc,
3353 				  struct intel_encoder *encoder)
3354 {
3355 	struct intel_display *display = to_intel_display(crtc);
3356 	struct intel_crtc_state *crtc_state =
3357 		intel_atomic_get_new_crtc_state(state, crtc);
3358 	struct icl_port_dpll *port_dpll =
3359 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3360 	enum port port = encoder->port;
3361 	unsigned long dpll_mask;
3362 
3363 	if (display->platform.alderlake_s) {
3364 		dpll_mask =
3365 			BIT(DPLL_ID_DG1_DPLL3) |
3366 			BIT(DPLL_ID_DG1_DPLL2) |
3367 			BIT(DPLL_ID_ICL_DPLL1) |
3368 			BIT(DPLL_ID_ICL_DPLL0);
3369 	} else if (display->platform.dg1) {
3370 		if (port == PORT_D || port == PORT_E) {
3371 			dpll_mask =
3372 				BIT(DPLL_ID_DG1_DPLL2) |
3373 				BIT(DPLL_ID_DG1_DPLL3);
3374 		} else {
3375 			dpll_mask =
3376 				BIT(DPLL_ID_DG1_DPLL0) |
3377 				BIT(DPLL_ID_DG1_DPLL1);
3378 		}
3379 	} else if (display->platform.rocketlake) {
3380 		dpll_mask =
3381 			BIT(DPLL_ID_EHL_DPLL4) |
3382 			BIT(DPLL_ID_ICL_DPLL1) |
3383 			BIT(DPLL_ID_ICL_DPLL0);
3384 	} else if ((display->platform.jasperlake ||
3385 		    display->platform.elkhartlake) &&
3386 		   port != PORT_A) {
3387 		dpll_mask =
3388 			BIT(DPLL_ID_EHL_DPLL4) |
3389 			BIT(DPLL_ID_ICL_DPLL1) |
3390 			BIT(DPLL_ID_ICL_DPLL0);
3391 	} else {
3392 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3393 	}
3394 
3395 	/* Eliminate DPLLs from consideration if reserved by HTI */
3396 	dpll_mask &= ~intel_hti_dpll_mask(display);
3397 
3398 	port_dpll->pll = intel_find_dpll(state, crtc,
3399 					 &port_dpll->hw_state,
3400 					 dpll_mask);
3401 	if (!port_dpll->pll)
3402 		return -EINVAL;
3403 
3404 	intel_reference_dpll(state, crtc,
3405 			     port_dpll->pll, &port_dpll->hw_state);
3406 
3407 	icl_update_active_dpll(state, crtc, encoder);
3408 
3409 	return 0;
3410 }
3411 
3412 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3413 				    struct intel_crtc *crtc)
3414 {
3415 	struct intel_display *display = to_intel_display(state);
3416 	struct intel_crtc_state *crtc_state =
3417 		intel_atomic_get_new_crtc_state(state, crtc);
3418 	const struct intel_crtc_state *old_crtc_state =
3419 		intel_atomic_get_old_crtc_state(state, crtc);
3420 	struct icl_port_dpll *port_dpll =
3421 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3422 	struct skl_wrpll_params pll_params = {};
3423 	int ret;
3424 
3425 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3426 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3427 	if (ret)
3428 		return ret;
3429 
3430 	icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
3431 
3432 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3433 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3434 	if (ret)
3435 		return ret;
3436 
3437 	/* this is mainly for the fastset check */
3438 	if (old_crtc_state->intel_dpll &&
3439 	    old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3440 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3441 	else
3442 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3443 
3444 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(display, NULL,
3445 							 &port_dpll->hw_state);
3446 
3447 	return 0;
3448 }
3449 
3450 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3451 				struct intel_crtc *crtc,
3452 				struct intel_encoder *encoder)
3453 {
3454 	struct intel_crtc_state *crtc_state =
3455 		intel_atomic_get_new_crtc_state(state, crtc);
3456 	struct icl_port_dpll *port_dpll =
3457 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3458 	enum intel_dpll_id dpll_id;
3459 	int ret;
3460 
3461 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3462 	port_dpll->pll = intel_find_dpll(state, crtc,
3463 					 &port_dpll->hw_state,
3464 					 BIT(DPLL_ID_ICL_TBTPLL));
3465 	if (!port_dpll->pll)
3466 		return -EINVAL;
3467 	intel_reference_dpll(state, crtc,
3468 			     port_dpll->pll, &port_dpll->hw_state);
3469 
3470 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3471 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3472 	port_dpll->pll = intel_find_dpll(state, crtc,
3473 					 &port_dpll->hw_state,
3474 					 BIT(dpll_id));
3475 	if (!port_dpll->pll) {
3476 		ret = -EINVAL;
3477 		goto err_unreference_tbt_pll;
3478 	}
3479 	intel_reference_dpll(state, crtc,
3480 			     port_dpll->pll, &port_dpll->hw_state);
3481 
3482 	icl_update_active_dpll(state, crtc, encoder);
3483 
3484 	return 0;
3485 
3486 err_unreference_tbt_pll:
3487 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3488 	intel_unreference_dpll(state, crtc, port_dpll->pll);
3489 
3490 	return ret;
3491 }
3492 
3493 static int icl_compute_dplls(struct intel_atomic_state *state,
3494 			     struct intel_crtc *crtc,
3495 			     struct intel_encoder *encoder)
3496 {
3497 	if (intel_encoder_is_combo(encoder))
3498 		return icl_compute_combo_phy_dpll(state, crtc);
3499 	else if (intel_encoder_is_tc(encoder))
3500 		return icl_compute_tc_phy_dplls(state, crtc);
3501 
3502 	MISSING_CASE(encoder->port);
3503 
3504 	return 0;
3505 }
3506 
3507 static int icl_get_dplls(struct intel_atomic_state *state,
3508 			 struct intel_crtc *crtc,
3509 			 struct intel_encoder *encoder)
3510 {
3511 	if (intel_encoder_is_combo(encoder))
3512 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3513 	else if (intel_encoder_is_tc(encoder))
3514 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3515 
3516 	MISSING_CASE(encoder->port);
3517 
3518 	return -EINVAL;
3519 }
3520 
3521 static void icl_put_dplls(struct intel_atomic_state *state,
3522 			  struct intel_crtc *crtc)
3523 {
3524 	const struct intel_crtc_state *old_crtc_state =
3525 		intel_atomic_get_old_crtc_state(state, crtc);
3526 	struct intel_crtc_state *new_crtc_state =
3527 		intel_atomic_get_new_crtc_state(state, crtc);
3528 	enum icl_port_dpll_id id;
3529 
3530 	new_crtc_state->intel_dpll = NULL;
3531 
3532 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3533 		const struct icl_port_dpll *old_port_dpll =
3534 			&old_crtc_state->icl_port_dplls[id];
3535 		struct icl_port_dpll *new_port_dpll =
3536 			&new_crtc_state->icl_port_dplls[id];
3537 
3538 		new_port_dpll->pll = NULL;
3539 
3540 		if (!old_port_dpll->pll)
3541 			continue;
3542 
3543 		intel_unreference_dpll(state, crtc, old_port_dpll->pll);
3544 	}
3545 }
3546 
3547 static bool mg_pll_get_hw_state(struct intel_display *display,
3548 				struct intel_dpll *pll,
3549 				struct intel_dpll_hw_state *dpll_hw_state)
3550 {
3551 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3552 	const enum intel_dpll_id id = pll->info->id;
3553 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3554 	intel_wakeref_t wakeref;
3555 	bool ret = false;
3556 	u32 val;
3557 
3558 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
3559 
3560 	wakeref = intel_display_power_get_if_enabled(display,
3561 						     POWER_DOMAIN_DISPLAY_CORE);
3562 	if (!wakeref)
3563 		return false;
3564 
3565 	val = intel_de_read(display, enable_reg);
3566 	if (!(val & PLL_ENABLE))
3567 		goto out;
3568 
3569 	hw_state->mg_refclkin_ctl = intel_de_read(display,
3570 						  MG_REFCLKIN_CTL(tc_port));
3571 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3572 
3573 	hw_state->mg_clktop2_coreclkctl1 =
3574 		intel_de_read(display, MG_CLKTOP2_CORECLKCTL1(tc_port));
3575 	hw_state->mg_clktop2_coreclkctl1 &=
3576 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3577 
3578 	hw_state->mg_clktop2_hsclkctl =
3579 		intel_de_read(display, MG_CLKTOP2_HSCLKCTL(tc_port));
3580 	hw_state->mg_clktop2_hsclkctl &=
3581 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3582 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3583 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3584 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3585 
3586 	hw_state->mg_pll_div0 = intel_de_read(display, MG_PLL_DIV0(tc_port));
3587 	hw_state->mg_pll_div1 = intel_de_read(display, MG_PLL_DIV1(tc_port));
3588 	hw_state->mg_pll_lf = intel_de_read(display, MG_PLL_LF(tc_port));
3589 	hw_state->mg_pll_frac_lock = intel_de_read(display,
3590 						   MG_PLL_FRAC_LOCK(tc_port));
3591 	hw_state->mg_pll_ssc = intel_de_read(display, MG_PLL_SSC(tc_port));
3592 
3593 	hw_state->mg_pll_bias = intel_de_read(display, MG_PLL_BIAS(tc_port));
3594 	hw_state->mg_pll_tdc_coldst_bias =
3595 		intel_de_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3596 
3597 	if (display->dpll.ref_clks.nssc == 38400) {
3598 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3599 		hw_state->mg_pll_bias_mask = 0;
3600 	} else {
3601 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3602 		hw_state->mg_pll_bias_mask = -1U;
3603 	}
3604 
3605 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3606 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3607 
3608 	ret = true;
3609 out:
3610 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3611 	return ret;
3612 }
3613 
3614 static bool dkl_pll_get_hw_state(struct intel_display *display,
3615 				 struct intel_dpll *pll,
3616 				 struct intel_dpll_hw_state *dpll_hw_state)
3617 {
3618 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3619 	const enum intel_dpll_id id = pll->info->id;
3620 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3621 	intel_wakeref_t wakeref;
3622 	bool ret = false;
3623 	u32 val;
3624 
3625 	wakeref = intel_display_power_get_if_enabled(display,
3626 						     POWER_DOMAIN_DISPLAY_CORE);
3627 	if (!wakeref)
3628 		return false;
3629 
3630 	val = intel_de_read(display, intel_tc_pll_enable_reg(display, pll));
3631 	if (!(val & PLL_ENABLE))
3632 		goto out;
3633 
3634 	/*
3635 	 * All registers read here have the same HIP_INDEX_REG even though
3636 	 * they are on different building blocks
3637 	 */
3638 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(display,
3639 						       DKL_REFCLKIN_CTL(tc_port));
3640 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3641 
3642 	hw_state->mg_clktop2_hsclkctl =
3643 		intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3644 	hw_state->mg_clktop2_hsclkctl &=
3645 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3646 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3647 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3648 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3649 
3650 	hw_state->mg_clktop2_coreclkctl1 =
3651 		intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3652 	hw_state->mg_clktop2_coreclkctl1 &=
3653 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3654 
3655 	hw_state->mg_pll_div0 = intel_dkl_phy_read(display, DKL_PLL_DIV0(tc_port));
3656 	val = DKL_PLL_DIV0_MASK;
3657 	if (display->vbt.override_afc_startup)
3658 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3659 	hw_state->mg_pll_div0 &= val;
3660 
3661 	hw_state->mg_pll_div1 = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3662 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3663 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3664 
3665 	hw_state->mg_pll_ssc = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3666 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3667 				 DKL_PLL_SSC_STEP_LEN_MASK |
3668 				 DKL_PLL_SSC_STEP_NUM_MASK |
3669 				 DKL_PLL_SSC_EN);
3670 
3671 	hw_state->mg_pll_bias = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3672 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3673 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3674 
3675 	hw_state->mg_pll_tdc_coldst_bias =
3676 		intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3677 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3678 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3679 
3680 	ret = true;
3681 out:
3682 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3683 	return ret;
3684 }
3685 
3686 static bool icl_pll_get_hw_state(struct intel_display *display,
3687 				 struct intel_dpll *pll,
3688 				 struct intel_dpll_hw_state *dpll_hw_state,
3689 				 i915_reg_t enable_reg)
3690 {
3691 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3692 	const enum intel_dpll_id id = pll->info->id;
3693 	intel_wakeref_t wakeref;
3694 	bool ret = false;
3695 	u32 val;
3696 
3697 	wakeref = intel_display_power_get_if_enabled(display,
3698 						     POWER_DOMAIN_DISPLAY_CORE);
3699 	if (!wakeref)
3700 		return false;
3701 
3702 	val = intel_de_read(display, enable_reg);
3703 	if (!(val & PLL_ENABLE))
3704 		goto out;
3705 
3706 	if (display->platform.alderlake_s) {
3707 		hw_state->cfgcr0 = intel_de_read(display, ADLS_DPLL_CFGCR0(id));
3708 		hw_state->cfgcr1 = intel_de_read(display, ADLS_DPLL_CFGCR1(id));
3709 	} else if (display->platform.dg1) {
3710 		hw_state->cfgcr0 = intel_de_read(display, DG1_DPLL_CFGCR0(id));
3711 		hw_state->cfgcr1 = intel_de_read(display, DG1_DPLL_CFGCR1(id));
3712 	} else if (display->platform.rocketlake) {
3713 		hw_state->cfgcr0 = intel_de_read(display,
3714 						 RKL_DPLL_CFGCR0(id));
3715 		hw_state->cfgcr1 = intel_de_read(display,
3716 						 RKL_DPLL_CFGCR1(id));
3717 	} else if (DISPLAY_VER(display) >= 12) {
3718 		hw_state->cfgcr0 = intel_de_read(display,
3719 						 TGL_DPLL_CFGCR0(id));
3720 		hw_state->cfgcr1 = intel_de_read(display,
3721 						 TGL_DPLL_CFGCR1(id));
3722 		if (display->vbt.override_afc_startup) {
3723 			hw_state->div0 = intel_de_read(display, TGL_DPLL0_DIV0(id));
3724 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3725 		}
3726 	} else {
3727 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3728 		    id == DPLL_ID_EHL_DPLL4) {
3729 			hw_state->cfgcr0 = intel_de_read(display,
3730 							 ICL_DPLL_CFGCR0(4));
3731 			hw_state->cfgcr1 = intel_de_read(display,
3732 							 ICL_DPLL_CFGCR1(4));
3733 		} else {
3734 			hw_state->cfgcr0 = intel_de_read(display,
3735 							 ICL_DPLL_CFGCR0(id));
3736 			hw_state->cfgcr1 = intel_de_read(display,
3737 							 ICL_DPLL_CFGCR1(id));
3738 		}
3739 	}
3740 
3741 	ret = true;
3742 out:
3743 	intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3744 	return ret;
3745 }
3746 
3747 static bool combo_pll_get_hw_state(struct intel_display *display,
3748 				   struct intel_dpll *pll,
3749 				   struct intel_dpll_hw_state *dpll_hw_state)
3750 {
3751 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3752 
3753 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
3754 }
3755 
3756 static bool tbt_pll_get_hw_state(struct intel_display *display,
3757 				 struct intel_dpll *pll,
3758 				 struct intel_dpll_hw_state *dpll_hw_state)
3759 {
3760 	return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
3761 }
3762 
3763 static void icl_dpll_write(struct intel_display *display,
3764 			   struct intel_dpll *pll,
3765 			   const struct icl_dpll_hw_state *hw_state)
3766 {
3767 	const enum intel_dpll_id id = pll->info->id;
3768 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3769 
3770 	if (display->platform.alderlake_s) {
3771 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3772 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3773 	} else if (display->platform.dg1) {
3774 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3775 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3776 	} else if (display->platform.rocketlake) {
3777 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3778 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3779 	} else if (DISPLAY_VER(display) >= 12) {
3780 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3781 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3782 		div0_reg = TGL_DPLL0_DIV0(id);
3783 	} else {
3784 		if ((display->platform.jasperlake || display->platform.elkhartlake) &&
3785 		    id == DPLL_ID_EHL_DPLL4) {
3786 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3787 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3788 		} else {
3789 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3790 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3791 		}
3792 	}
3793 
3794 	intel_de_write(display, cfgcr0_reg, hw_state->cfgcr0);
3795 	intel_de_write(display, cfgcr1_reg, hw_state->cfgcr1);
3796 	drm_WARN_ON_ONCE(display->drm, display->vbt.override_afc_startup &&
3797 			 !i915_mmio_reg_valid(div0_reg));
3798 	if (display->vbt.override_afc_startup &&
3799 	    i915_mmio_reg_valid(div0_reg))
3800 		intel_de_rmw(display, div0_reg,
3801 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3802 	intel_de_posting_read(display, cfgcr1_reg);
3803 }
3804 
3805 static void icl_mg_pll_write(struct intel_display *display,
3806 			     struct intel_dpll *pll,
3807 			     const struct icl_dpll_hw_state *hw_state)
3808 {
3809 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3810 
3811 	/*
3812 	 * Some of the following registers have reserved fields, so program
3813 	 * these with RMW based on a mask. The mask can be fixed or generated
3814 	 * during the calc/readout phase if the mask depends on some other HW
3815 	 * state like refclk, see icl_calc_mg_pll_state().
3816 	 */
3817 	intel_de_rmw(display, MG_REFCLKIN_CTL(tc_port),
3818 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3819 
3820 	intel_de_rmw(display, MG_CLKTOP2_CORECLKCTL1(tc_port),
3821 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3822 		     hw_state->mg_clktop2_coreclkctl1);
3823 
3824 	intel_de_rmw(display, MG_CLKTOP2_HSCLKCTL(tc_port),
3825 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3826 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3827 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3828 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3829 		     hw_state->mg_clktop2_hsclkctl);
3830 
3831 	intel_de_write(display, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3832 	intel_de_write(display, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3833 	intel_de_write(display, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3834 	intel_de_write(display, MG_PLL_FRAC_LOCK(tc_port),
3835 		       hw_state->mg_pll_frac_lock);
3836 	intel_de_write(display, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3837 
3838 	intel_de_rmw(display, MG_PLL_BIAS(tc_port),
3839 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3840 
3841 	intel_de_rmw(display, MG_PLL_TDC_COLDST_BIAS(tc_port),
3842 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3843 		     hw_state->mg_pll_tdc_coldst_bias);
3844 
3845 	intel_de_posting_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
3846 }
3847 
3848 static void dkl_pll_write(struct intel_display *display,
3849 			  struct intel_dpll *pll,
3850 			  const struct icl_dpll_hw_state *hw_state)
3851 {
3852 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3853 	u32 val;
3854 
3855 	/*
3856 	 * All registers programmed here have the same HIP_INDEX_REG even
3857 	 * though on different building block
3858 	 */
3859 	/* All the registers are RMW */
3860 	val = intel_dkl_phy_read(display, DKL_REFCLKIN_CTL(tc_port));
3861 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3862 	val |= hw_state->mg_refclkin_ctl;
3863 	intel_dkl_phy_write(display, DKL_REFCLKIN_CTL(tc_port), val);
3864 
3865 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3866 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3867 	val |= hw_state->mg_clktop2_coreclkctl1;
3868 	intel_dkl_phy_write(display, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3869 
3870 	val = intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
3871 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3872 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3873 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3874 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3875 	val |= hw_state->mg_clktop2_hsclkctl;
3876 	intel_dkl_phy_write(display, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3877 
3878 	val = DKL_PLL_DIV0_MASK;
3879 	if (display->vbt.override_afc_startup)
3880 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3881 	intel_dkl_phy_rmw(display, DKL_PLL_DIV0(tc_port), val,
3882 			  hw_state->mg_pll_div0);
3883 
3884 	val = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
3885 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3886 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3887 	val |= hw_state->mg_pll_div1;
3888 	intel_dkl_phy_write(display, DKL_PLL_DIV1(tc_port), val);
3889 
3890 	val = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
3891 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3892 		 DKL_PLL_SSC_STEP_LEN_MASK |
3893 		 DKL_PLL_SSC_STEP_NUM_MASK |
3894 		 DKL_PLL_SSC_EN);
3895 	val |= hw_state->mg_pll_ssc;
3896 	intel_dkl_phy_write(display, DKL_PLL_SSC(tc_port), val);
3897 
3898 	val = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
3899 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3900 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3901 	val |= hw_state->mg_pll_bias;
3902 	intel_dkl_phy_write(display, DKL_PLL_BIAS(tc_port), val);
3903 
3904 	val = intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3905 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3906 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3907 	val |= hw_state->mg_pll_tdc_coldst_bias;
3908 	intel_dkl_phy_write(display, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3909 
3910 	intel_dkl_phy_posting_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3911 }
3912 
3913 static void icl_pll_power_enable(struct intel_display *display,
3914 				 struct intel_dpll *pll,
3915 				 i915_reg_t enable_reg)
3916 {
3917 	intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
3918 
3919 	/*
3920 	 * The spec says we need to "wait" but it also says it should be
3921 	 * immediate.
3922 	 */
3923 	if (intel_de_wait_for_set_ms(display, enable_reg, PLL_POWER_STATE, 1))
3924 		drm_err(display->drm, "PLL %d Power not enabled\n",
3925 			pll->info->id);
3926 }
3927 
3928 static void icl_pll_enable(struct intel_display *display,
3929 			   struct intel_dpll *pll,
3930 			   i915_reg_t enable_reg)
3931 {
3932 	intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
3933 
3934 	/* Timeout is actually 600us. */
3935 	if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 1))
3936 		drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
3937 }
3938 
3939 static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_dpll *pll)
3940 {
3941 	u32 val;
3942 
3943 	if (!(display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) ||
3944 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3945 		return;
3946 	/*
3947 	 * Wa_16011069516:adl-p[a0]
3948 	 *
3949 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3950 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3951 	 * sanity check this assumption with a double read, which presumably
3952 	 * returns the correct value even with clock gating on.
3953 	 *
3954 	 * Instead of the usual place for workarounds we apply this one here,
3955 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3956 	 */
3957 	val = intel_de_read(display, TRANS_CMTG_CHICKEN);
3958 	val = intel_de_rmw(display, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3959 	if (drm_WARN_ON(display->drm, val & ~DISABLE_DPT_CLK_GATING))
3960 		drm_dbg_kms(display->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3961 }
3962 
3963 static void combo_pll_enable(struct intel_display *display,
3964 			     struct intel_dpll *pll,
3965 			     const struct intel_dpll_hw_state *dpll_hw_state)
3966 {
3967 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3968 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
3969 
3970 	icl_pll_power_enable(display, pll, enable_reg);
3971 
3972 	icl_dpll_write(display, pll, hw_state);
3973 
3974 	/*
3975 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3976 	 * paths should already be setting the appropriate voltage, hence we do
3977 	 * nothing here.
3978 	 */
3979 
3980 	icl_pll_enable(display, pll, enable_reg);
3981 
3982 	adlp_cmtg_clock_gating_wa(display, pll);
3983 
3984 	/* DVFS post sequence would be here. See the comment above. */
3985 }
3986 
3987 static void tbt_pll_enable(struct intel_display *display,
3988 			   struct intel_dpll *pll,
3989 			   const struct intel_dpll_hw_state *dpll_hw_state)
3990 {
3991 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3992 
3993 	icl_pll_power_enable(display, pll, TBT_PLL_ENABLE);
3994 
3995 	icl_dpll_write(display, pll, hw_state);
3996 
3997 	/*
3998 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3999 	 * paths should already be setting the appropriate voltage, hence we do
4000 	 * nothing here.
4001 	 */
4002 
4003 	icl_pll_enable(display, pll, TBT_PLL_ENABLE);
4004 
4005 	/* DVFS post sequence would be here. See the comment above. */
4006 }
4007 
4008 static void mg_pll_enable(struct intel_display *display,
4009 			  struct intel_dpll *pll,
4010 			  const struct intel_dpll_hw_state *dpll_hw_state)
4011 {
4012 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4013 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4014 
4015 	icl_pll_power_enable(display, pll, enable_reg);
4016 
4017 	if (DISPLAY_VER(display) >= 12)
4018 		dkl_pll_write(display, pll, hw_state);
4019 	else
4020 		icl_mg_pll_write(display, pll, hw_state);
4021 
4022 	/*
4023 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4024 	 * paths should already be setting the appropriate voltage, hence we do
4025 	 * nothing here.
4026 	 */
4027 
4028 	icl_pll_enable(display, pll, enable_reg);
4029 
4030 	/* DVFS post sequence would be here. See the comment above. */
4031 }
4032 
4033 static void icl_pll_disable(struct intel_display *display,
4034 			    struct intel_dpll *pll,
4035 			    i915_reg_t enable_reg)
4036 {
4037 	/* The first steps are done by intel_ddi_post_disable(). */
4038 
4039 	/*
4040 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4041 	 * paths should already be setting the appropriate voltage, hence we do
4042 	 * nothing here.
4043 	 */
4044 
4045 	intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
4046 
4047 	/* Timeout is actually 1us. */
4048 	if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 1))
4049 		drm_err(display->drm, "PLL %d locked\n", pll->info->id);
4050 
4051 	/* DVFS post sequence would be here. See the comment above. */
4052 
4053 	intel_de_rmw(display, enable_reg, PLL_POWER_ENABLE, 0);
4054 
4055 	/*
4056 	 * The spec says we need to "wait" but it also says it should be
4057 	 * immediate.
4058 	 */
4059 	if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_POWER_STATE, 1))
4060 		drm_err(display->drm, "PLL %d Power not disabled\n",
4061 			pll->info->id);
4062 }
4063 
4064 static void combo_pll_disable(struct intel_display *display,
4065 			      struct intel_dpll *pll)
4066 {
4067 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
4068 
4069 	icl_pll_disable(display, pll, enable_reg);
4070 }
4071 
4072 static void tbt_pll_disable(struct intel_display *display,
4073 			    struct intel_dpll *pll)
4074 {
4075 	icl_pll_disable(display, pll, TBT_PLL_ENABLE);
4076 }
4077 
4078 static void mg_pll_disable(struct intel_display *display,
4079 			   struct intel_dpll *pll)
4080 {
4081 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
4082 
4083 	icl_pll_disable(display, pll, enable_reg);
4084 }
4085 
4086 static void icl_update_dpll_ref_clks(struct intel_display *display)
4087 {
4088 	/* No SSC ref */
4089 	display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
4090 }
4091 
4092 static void icl_dump_hw_state(struct drm_printer *p,
4093 			      const struct intel_dpll_hw_state *dpll_hw_state)
4094 {
4095 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4096 
4097 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4098 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4099 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4100 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4101 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4102 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4103 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4104 		   hw_state->mg_refclkin_ctl,
4105 		   hw_state->mg_clktop2_coreclkctl1,
4106 		   hw_state->mg_clktop2_hsclkctl,
4107 		   hw_state->mg_pll_div0,
4108 		   hw_state->mg_pll_div1,
4109 		   hw_state->mg_pll_lf,
4110 		   hw_state->mg_pll_frac_lock,
4111 		   hw_state->mg_pll_ssc,
4112 		   hw_state->mg_pll_bias,
4113 		   hw_state->mg_pll_tdc_coldst_bias);
4114 }
4115 
4116 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4117 				 const struct intel_dpll_hw_state *_b)
4118 {
4119 	const struct icl_dpll_hw_state *a = &_a->icl;
4120 	const struct icl_dpll_hw_state *b = &_b->icl;
4121 
4122 	/* FIXME split combo vs. mg more thoroughly */
4123 	return a->cfgcr0 == b->cfgcr0 &&
4124 		a->cfgcr1 == b->cfgcr1 &&
4125 		a->div0 == b->div0 &&
4126 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4127 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4128 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4129 		a->mg_pll_div0 == b->mg_pll_div0 &&
4130 		a->mg_pll_div1 == b->mg_pll_div1 &&
4131 		a->mg_pll_lf == b->mg_pll_lf &&
4132 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4133 		a->mg_pll_ssc == b->mg_pll_ssc &&
4134 		a->mg_pll_bias == b->mg_pll_bias &&
4135 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4136 }
4137 
4138 static const struct intel_dpll_funcs combo_pll_funcs = {
4139 	.enable = combo_pll_enable,
4140 	.disable = combo_pll_disable,
4141 	.get_hw_state = combo_pll_get_hw_state,
4142 	.get_freq = icl_ddi_combo_pll_get_freq,
4143 };
4144 
4145 static const struct intel_dpll_funcs tbt_pll_funcs = {
4146 	.enable = tbt_pll_enable,
4147 	.disable = tbt_pll_disable,
4148 	.get_hw_state = tbt_pll_get_hw_state,
4149 	.get_freq = icl_ddi_tbt_pll_get_freq,
4150 };
4151 
4152 static const struct intel_dpll_funcs mg_pll_funcs = {
4153 	.enable = mg_pll_enable,
4154 	.disable = mg_pll_disable,
4155 	.get_hw_state = mg_pll_get_hw_state,
4156 	.get_freq = icl_ddi_mg_pll_get_freq,
4157 };
4158 
4159 static const struct dpll_info icl_plls[] = {
4160 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4161 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4162 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4163 	  .is_alt_port_dpll = true, },
4164 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4165 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4166 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4167 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4168 	{}
4169 };
4170 
4171 static const struct intel_dpll_mgr icl_pll_mgr = {
4172 	.dpll_info = icl_plls,
4173 	.compute_dplls = icl_compute_dplls,
4174 	.get_dplls = icl_get_dplls,
4175 	.put_dplls = icl_put_dplls,
4176 	.update_active_dpll = icl_update_active_dpll,
4177 	.update_ref_clks = icl_update_dpll_ref_clks,
4178 	.dump_hw_state = icl_dump_hw_state,
4179 	.compare_hw_state = icl_compare_hw_state,
4180 };
4181 
4182 static const struct dpll_info ehl_plls[] = {
4183 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4184 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4185 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4186 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4187 	{}
4188 };
4189 
4190 static const struct intel_dpll_mgr ehl_pll_mgr = {
4191 	.dpll_info = ehl_plls,
4192 	.compute_dplls = icl_compute_dplls,
4193 	.get_dplls = icl_get_dplls,
4194 	.put_dplls = icl_put_dplls,
4195 	.update_ref_clks = icl_update_dpll_ref_clks,
4196 	.dump_hw_state = icl_dump_hw_state,
4197 	.compare_hw_state = icl_compare_hw_state,
4198 };
4199 
4200 static const struct intel_dpll_funcs dkl_pll_funcs = {
4201 	.enable = mg_pll_enable,
4202 	.disable = mg_pll_disable,
4203 	.get_hw_state = dkl_pll_get_hw_state,
4204 	.get_freq = icl_ddi_mg_pll_get_freq,
4205 };
4206 
4207 static const struct dpll_info tgl_plls[] = {
4208 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4209 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4210 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4211 	  .is_alt_port_dpll = true, },
4212 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4213 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4214 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4215 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4216 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4217 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4218 	{}
4219 };
4220 
4221 static const struct intel_dpll_mgr tgl_pll_mgr = {
4222 	.dpll_info = tgl_plls,
4223 	.compute_dplls = icl_compute_dplls,
4224 	.get_dplls = icl_get_dplls,
4225 	.put_dplls = icl_put_dplls,
4226 	.update_active_dpll = icl_update_active_dpll,
4227 	.update_ref_clks = icl_update_dpll_ref_clks,
4228 	.dump_hw_state = icl_dump_hw_state,
4229 	.compare_hw_state = icl_compare_hw_state,
4230 };
4231 
4232 static const struct dpll_info rkl_plls[] = {
4233 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4234 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4235 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4236 	{}
4237 };
4238 
4239 static const struct intel_dpll_mgr rkl_pll_mgr = {
4240 	.dpll_info = rkl_plls,
4241 	.compute_dplls = icl_compute_dplls,
4242 	.get_dplls = icl_get_dplls,
4243 	.put_dplls = icl_put_dplls,
4244 	.update_ref_clks = icl_update_dpll_ref_clks,
4245 	.dump_hw_state = icl_dump_hw_state,
4246 	.compare_hw_state = icl_compare_hw_state,
4247 };
4248 
4249 static const struct dpll_info dg1_plls[] = {
4250 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4251 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4252 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4253 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4254 	{}
4255 };
4256 
4257 static const struct intel_dpll_mgr dg1_pll_mgr = {
4258 	.dpll_info = dg1_plls,
4259 	.compute_dplls = icl_compute_dplls,
4260 	.get_dplls = icl_get_dplls,
4261 	.put_dplls = icl_put_dplls,
4262 	.update_ref_clks = icl_update_dpll_ref_clks,
4263 	.dump_hw_state = icl_dump_hw_state,
4264 	.compare_hw_state = icl_compare_hw_state,
4265 };
4266 
4267 static const struct dpll_info adls_plls[] = {
4268 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4269 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4270 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4271 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4272 	{}
4273 };
4274 
4275 static const struct intel_dpll_mgr adls_pll_mgr = {
4276 	.dpll_info = adls_plls,
4277 	.compute_dplls = icl_compute_dplls,
4278 	.get_dplls = icl_get_dplls,
4279 	.put_dplls = icl_put_dplls,
4280 	.update_ref_clks = icl_update_dpll_ref_clks,
4281 	.dump_hw_state = icl_dump_hw_state,
4282 	.compare_hw_state = icl_compare_hw_state,
4283 };
4284 
4285 static const struct dpll_info adlp_plls[] = {
4286 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4287 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4288 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4289 	  .is_alt_port_dpll = true, },
4290 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4291 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4292 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4293 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4294 	{}
4295 };
4296 
4297 static const struct intel_dpll_mgr adlp_pll_mgr = {
4298 	.dpll_info = adlp_plls,
4299 	.compute_dplls = icl_compute_dplls,
4300 	.get_dplls = icl_get_dplls,
4301 	.put_dplls = icl_put_dplls,
4302 	.update_active_dpll = icl_update_active_dpll,
4303 	.update_ref_clks = icl_update_dpll_ref_clks,
4304 	.dump_hw_state = icl_dump_hw_state,
4305 	.compare_hw_state = icl_compare_hw_state,
4306 };
4307 
4308 /**
4309  * intel_dpll_init - Initialize DPLLs
4310  * @display: intel_display device
4311  *
4312  * Initialize DPLLs for @display.
4313  */
4314 void intel_dpll_init(struct intel_display *display)
4315 {
4316 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4317 	const struct dpll_info *dpll_info;
4318 	int i;
4319 
4320 	mutex_init(&display->dpll.lock);
4321 
4322 	if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
4323 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4324 		dpll_mgr = NULL;
4325 	else if (display->platform.alderlake_p)
4326 		dpll_mgr = &adlp_pll_mgr;
4327 	else if (display->platform.alderlake_s)
4328 		dpll_mgr = &adls_pll_mgr;
4329 	else if (display->platform.dg1)
4330 		dpll_mgr = &dg1_pll_mgr;
4331 	else if (display->platform.rocketlake)
4332 		dpll_mgr = &rkl_pll_mgr;
4333 	else if (DISPLAY_VER(display) >= 12)
4334 		dpll_mgr = &tgl_pll_mgr;
4335 	else if (display->platform.jasperlake || display->platform.elkhartlake)
4336 		dpll_mgr = &ehl_pll_mgr;
4337 	else if (DISPLAY_VER(display) >= 11)
4338 		dpll_mgr = &icl_pll_mgr;
4339 	else if (display->platform.geminilake || display->platform.broxton)
4340 		dpll_mgr = &bxt_pll_mgr;
4341 	else if (DISPLAY_VER(display) == 9)
4342 		dpll_mgr = &skl_pll_mgr;
4343 	else if (HAS_DDI(display))
4344 		dpll_mgr = &hsw_pll_mgr;
4345 	else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
4346 		dpll_mgr = &pch_pll_mgr;
4347 
4348 	if (!dpll_mgr)
4349 		return;
4350 
4351 	dpll_info = dpll_mgr->dpll_info;
4352 
4353 	for (i = 0; dpll_info[i].name; i++) {
4354 		if (drm_WARN_ON(display->drm,
4355 				i >= ARRAY_SIZE(display->dpll.dplls)))
4356 			break;
4357 
4358 		/* must fit into unsigned long bitmask on 32bit */
4359 		if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
4360 			break;
4361 
4362 		display->dpll.dplls[i].info = &dpll_info[i];
4363 		display->dpll.dplls[i].index = i;
4364 	}
4365 
4366 	display->dpll.mgr = dpll_mgr;
4367 	display->dpll.num_dpll = i;
4368 }
4369 
4370 /**
4371  * intel_dpll_compute - compute DPLL state CRTC and encoder combination
4372  * @state: atomic state
4373  * @crtc: CRTC to compute DPLLs for
4374  * @encoder: encoder
4375  *
4376  * This function computes the DPLL state for the given CRTC and encoder.
4377  *
4378  * The new configuration in the atomic commit @state is made effective by
4379  * calling intel_dpll_swap_state().
4380  *
4381  * Returns:
4382  * 0 on success, negative error code on failure.
4383  */
4384 int intel_dpll_compute(struct intel_atomic_state *state,
4385 		       struct intel_crtc *crtc,
4386 		       struct intel_encoder *encoder)
4387 {
4388 	struct intel_display *display = to_intel_display(state);
4389 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4390 
4391 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4392 		return -EINVAL;
4393 
4394 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4395 }
4396 
4397 /**
4398  * intel_dpll_reserve - reserve DPLLs for CRTC and encoder combination
4399  * @state: atomic state
4400  * @crtc: CRTC to reserve DPLLs for
4401  * @encoder: encoder
4402  *
4403  * This function reserves all required DPLLs for the given CRTC and encoder
4404  * combination in the current atomic commit @state and the new @crtc atomic
4405  * state.
4406  *
4407  * The new configuration in the atomic commit @state is made effective by
4408  * calling intel_dpll_swap_state().
4409  *
4410  * The reserved DPLLs should be released by calling
4411  * intel_dpll_release().
4412  *
4413  * Returns:
4414  * 0 if all required DPLLs were successfully reserved,
4415  * negative error code otherwise.
4416  */
4417 int intel_dpll_reserve(struct intel_atomic_state *state,
4418 		       struct intel_crtc *crtc,
4419 		       struct intel_encoder *encoder)
4420 {
4421 	struct intel_display *display = to_intel_display(state);
4422 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4423 
4424 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4425 		return -EINVAL;
4426 
4427 	return dpll_mgr->get_dplls(state, crtc, encoder);
4428 }
4429 
4430 /**
4431  * intel_dpll_release - end use of DPLLs by CRTC in atomic state
4432  * @state: atomic state
4433  * @crtc: crtc from which the DPLLs are to be released
4434  *
4435  * This function releases all DPLLs reserved by intel_dpll_reserve()
4436  * from the current atomic commit @state and the old @crtc atomic state.
4437  *
4438  * The new configuration in the atomic commit @state is made effective by
4439  * calling intel_dpll_swap_state().
4440  */
4441 void intel_dpll_release(struct intel_atomic_state *state,
4442 			struct intel_crtc *crtc)
4443 {
4444 	struct intel_display *display = to_intel_display(state);
4445 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4446 
4447 	/*
4448 	 * FIXME: this function is called for every platform having a
4449 	 * compute_clock hook, even though the platform doesn't yet support
4450 	 * the DPLL framework and intel_dpll_reserve() is not
4451 	 * called on those.
4452 	 */
4453 	if (!dpll_mgr)
4454 		return;
4455 
4456 	dpll_mgr->put_dplls(state, crtc);
4457 }
4458 
4459 /**
4460  * intel_dpll_update_active - update the active DPLL for a CRTC/encoder
4461  * @state: atomic state
4462  * @crtc: the CRTC for which to update the active DPLL
4463  * @encoder: encoder determining the type of port DPLL
4464  *
4465  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4466  * from the port DPLLs reserved previously by intel_dpll_reserve(). The
4467  * DPLL selected will be based on the current mode of the encoder's port.
4468  */
4469 void intel_dpll_update_active(struct intel_atomic_state *state,
4470 			      struct intel_crtc *crtc,
4471 			      struct intel_encoder *encoder)
4472 {
4473 	struct intel_display *display = to_intel_display(encoder);
4474 	const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
4475 
4476 	if (drm_WARN_ON(display->drm, !dpll_mgr))
4477 		return;
4478 
4479 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4480 }
4481 
4482 /**
4483  * intel_dpll_get_freq - calculate the DPLL's output frequency
4484  * @display: intel_display device
4485  * @pll: DPLL for which to calculate the output frequency
4486  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4487  *
4488  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4489  */
4490 int intel_dpll_get_freq(struct intel_display *display,
4491 			const struct intel_dpll *pll,
4492 			const struct intel_dpll_hw_state *dpll_hw_state)
4493 {
4494 	if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
4495 		return 0;
4496 
4497 	return pll->info->funcs->get_freq(display, pll, dpll_hw_state);
4498 }
4499 
4500 /**
4501  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4502  * @display: intel_display device instance
4503  * @pll: DPLL for which to calculate the output frequency
4504  * @dpll_hw_state: DPLL's hardware state
4505  *
4506  * Read out @pll's hardware state into @dpll_hw_state.
4507  */
4508 bool intel_dpll_get_hw_state(struct intel_display *display,
4509 			     struct intel_dpll *pll,
4510 			     struct intel_dpll_hw_state *dpll_hw_state)
4511 {
4512 	return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
4513 }
4514 
4515 static void readout_dpll_hw_state(struct intel_display *display,
4516 				  struct intel_dpll *pll)
4517 {
4518 	struct intel_crtc *crtc;
4519 
4520 	pll->on = intel_dpll_get_hw_state(display, pll, &pll->state.hw_state);
4521 
4522 	if (pll->on && pll->info->power_domain)
4523 		pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
4524 
4525 	pll->state.pipe_mask = 0;
4526 	for_each_intel_crtc(display->drm, crtc) {
4527 		struct intel_crtc_state *crtc_state =
4528 			to_intel_crtc_state(crtc->base.state);
4529 
4530 		if (crtc_state->hw.active && crtc_state->intel_dpll == pll)
4531 			intel_dpll_crtc_get(crtc, pll, &pll->state);
4532 	}
4533 	pll->active_mask = pll->state.pipe_mask;
4534 
4535 	drm_dbg_kms(display->drm,
4536 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4537 		    pll->info->name, pll->state.pipe_mask, pll->on);
4538 }
4539 
4540 void intel_dpll_update_ref_clks(struct intel_display *display)
4541 {
4542 	if (display->dpll.mgr && display->dpll.mgr->update_ref_clks)
4543 		display->dpll.mgr->update_ref_clks(display);
4544 }
4545 
4546 void intel_dpll_readout_hw_state(struct intel_display *display)
4547 {
4548 	struct intel_dpll *pll;
4549 	int i;
4550 
4551 	for_each_dpll(display, pll, i)
4552 		readout_dpll_hw_state(display, pll);
4553 }
4554 
4555 static void sanitize_dpll_state(struct intel_display *display,
4556 				struct intel_dpll *pll)
4557 {
4558 	if (!pll->on)
4559 		return;
4560 
4561 	adlp_cmtg_clock_gating_wa(display, pll);
4562 
4563 	if (pll->active_mask)
4564 		return;
4565 
4566 	drm_dbg_kms(display->drm,
4567 		    "%s enabled but not in use, disabling\n",
4568 		    pll->info->name);
4569 
4570 	_intel_disable_shared_dpll(display, pll);
4571 }
4572 
4573 void intel_dpll_sanitize_state(struct intel_display *display)
4574 {
4575 	struct intel_dpll *pll;
4576 	int i;
4577 
4578 	intel_cx0_pll_power_save_wa(display);
4579 
4580 	for_each_dpll(display, pll, i)
4581 		sanitize_dpll_state(display, pll);
4582 }
4583 
4584 /**
4585  * intel_dpll_dump_hw_state - dump hw_state
4586  * @display: intel_display structure
4587  * @p: where to print the state to
4588  * @dpll_hw_state: hw state to be dumped
4589  *
4590  * Dumo out the relevant values in @dpll_hw_state.
4591  */
4592 void intel_dpll_dump_hw_state(struct intel_display *display,
4593 			      struct drm_printer *p,
4594 			      const struct intel_dpll_hw_state *dpll_hw_state)
4595 {
4596 	if (display->dpll.mgr) {
4597 		display->dpll.mgr->dump_hw_state(p, dpll_hw_state);
4598 	} else {
4599 		/* fallback for platforms that don't use the shared dpll
4600 		 * infrastructure
4601 		 */
4602 		ibx_dump_hw_state(p, dpll_hw_state);
4603 	}
4604 }
4605 
4606 /**
4607  * intel_dpll_compare_hw_state - compare the two states
4608  * @display: intel_display structure
4609  * @a: first DPLL hw state
4610  * @b: second DPLL hw state
4611  *
4612  * Compare DPLL hw states @a and @b.
4613  *
4614  * Returns: true if the states are equal, false if the differ
4615  */
4616 bool intel_dpll_compare_hw_state(struct intel_display *display,
4617 				 const struct intel_dpll_hw_state *a,
4618 				 const struct intel_dpll_hw_state *b)
4619 {
4620 	if (display->dpll.mgr) {
4621 		return display->dpll.mgr->compare_hw_state(a, b);
4622 	} else {
4623 		/* fallback for platforms that don't use the shared dpll
4624 		 * infrastructure
4625 		 */
4626 		return ibx_compare_hw_state(a, b);
4627 	}
4628 }
4629 
4630 static void
4631 verify_single_dpll_state(struct intel_display *display,
4632 			 struct intel_dpll *pll,
4633 			 struct intel_crtc *crtc,
4634 			 const struct intel_crtc_state *new_crtc_state)
4635 {
4636 	struct intel_dpll_hw_state dpll_hw_state = {};
4637 	u8 pipe_mask;
4638 	bool active;
4639 
4640 	active = intel_dpll_get_hw_state(display, pll, &dpll_hw_state);
4641 
4642 	if (!pll->info->always_on) {
4643 		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4644 					 "%s: pll in active use but not on in sw tracking\n",
4645 					 pll->info->name);
4646 		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4647 					 "%s: pll is on but not used by any active pipe\n",
4648 					 pll->info->name);
4649 		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4650 					 "%s: pll on state mismatch (expected %i, found %i)\n",
4651 					 pll->info->name, pll->on, active);
4652 	}
4653 
4654 	if (!crtc) {
4655 		INTEL_DISPLAY_STATE_WARN(display,
4656 					 pll->active_mask & ~pll->state.pipe_mask,
4657 					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4658 					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4659 
4660 		return;
4661 	}
4662 
4663 	pipe_mask = BIT(crtc->pipe);
4664 
4665 	if (new_crtc_state->hw.active)
4666 		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4667 					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4668 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4669 	else
4670 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4671 					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4672 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4673 
4674 	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4675 				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4676 				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4677 
4678 	INTEL_DISPLAY_STATE_WARN(display,
4679 				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4680 						   sizeof(dpll_hw_state)),
4681 				 "%s: pll hw state mismatch\n",
4682 				 pll->info->name);
4683 }
4684 
4685 static bool has_alt_port_dpll(const struct intel_dpll *old_pll,
4686 			      const struct intel_dpll *new_pll)
4687 {
4688 	return old_pll && new_pll && old_pll != new_pll &&
4689 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4690 }
4691 
4692 void intel_dpll_state_verify(struct intel_atomic_state *state,
4693 			     struct intel_crtc *crtc)
4694 {
4695 	struct intel_display *display = to_intel_display(state);
4696 	const struct intel_crtc_state *old_crtc_state =
4697 		intel_atomic_get_old_crtc_state(state, crtc);
4698 	const struct intel_crtc_state *new_crtc_state =
4699 		intel_atomic_get_new_crtc_state(state, crtc);
4700 
4701 	if (new_crtc_state->intel_dpll)
4702 		verify_single_dpll_state(display, new_crtc_state->intel_dpll,
4703 					 crtc, new_crtc_state);
4704 
4705 	if (old_crtc_state->intel_dpll &&
4706 	    old_crtc_state->intel_dpll != new_crtc_state->intel_dpll) {
4707 		u8 pipe_mask = BIT(crtc->pipe);
4708 		struct intel_dpll *pll = old_crtc_state->intel_dpll;
4709 
4710 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4711 					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4712 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4713 
4714 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4715 		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->intel_dpll,
4716 								     new_crtc_state->intel_dpll) &&
4717 					 pll->state.pipe_mask & pipe_mask,
4718 					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4719 					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4720 	}
4721 }
4722 
4723 void intel_dpll_verify_disabled(struct intel_atomic_state *state)
4724 {
4725 	struct intel_display *display = to_intel_display(state);
4726 	struct intel_dpll *pll;
4727 	int i;
4728 
4729 	for_each_dpll(display, pll, i)
4730 		verify_single_dpll_state(display, pll, NULL, NULL);
4731 }
4732