xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "i915_reg.h"
28 #include "intel_de.h"
29 #include "intel_display_types.h"
30 #include "intel_dkl_phy.h"
31 #include "intel_dkl_phy_regs.h"
32 #include "intel_dpio_phy.h"
33 #include "intel_dpll.h"
34 #include "intel_dpll_mgr.h"
35 #include "intel_hti.h"
36 #include "intel_mg_phy_regs.h"
37 #include "intel_pch_refclk.h"
38 #include "intel_tc.h"
39 
40 /**
41  * DOC: Display PLLs
42  *
43  * Display PLLs used for driving outputs vary by platform. While some have
44  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45  * from a pool. In the latter scenario, it is possible that multiple pipes
46  * share a PLL if their configurations match.
47  *
48  * This file provides an abstraction over display PLLs. The function
49  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
50  * users of a PLL are tracked and that tracking is integrated with the atomic
51  * modset interface. During an atomic operation, required PLLs can be reserved
52  * for a given CRTC and encoder configuration by calling
53  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54  * with intel_release_shared_dplls().
55  * Changes to the users are first staged in the atomic state, and then made
56  * effective by calling intel_shared_dpll_swap_state() during the atomic
57  * commit phase.
58  */
59 
60 /* platform specific hooks for managing DPLLs */
61 struct intel_shared_dpll_funcs {
62 	/*
63 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 	 * the pll is not already enabled.
65 	 */
66 	void (*enable)(struct drm_i915_private *i915,
67 		       struct intel_shared_dpll *pll);
68 
69 	/*
70 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 	 * only when it is safe to disable the pll, i.e., there are no more
72 	 * tracked users for it.
73 	 */
74 	void (*disable)(struct drm_i915_private *i915,
75 			struct intel_shared_dpll *pll);
76 
77 	/*
78 	 * Hook for reading the values currently programmed to the DPLL
79 	 * registers. This is used for initial hw state readout and state
80 	 * verification after a mode set.
81 	 */
82 	bool (*get_hw_state)(struct drm_i915_private *i915,
83 			     struct intel_shared_dpll *pll,
84 			     struct intel_dpll_hw_state *hw_state);
85 
86 	/*
87 	 * Hook for calculating the pll's output frequency based on its passed
88 	 * in state.
89 	 */
90 	int (*get_freq)(struct drm_i915_private *i915,
91 			const struct intel_shared_dpll *pll,
92 			const struct intel_dpll_hw_state *pll_state);
93 };
94 
95 struct intel_dpll_mgr {
96 	const struct dpll_info *dpll_info;
97 
98 	int (*compute_dplls)(struct intel_atomic_state *state,
99 			     struct intel_crtc *crtc,
100 			     struct intel_encoder *encoder);
101 	int (*get_dplls)(struct intel_atomic_state *state,
102 			 struct intel_crtc *crtc,
103 			 struct intel_encoder *encoder);
104 	void (*put_dplls)(struct intel_atomic_state *state,
105 			  struct intel_crtc *crtc);
106 	void (*update_active_dpll)(struct intel_atomic_state *state,
107 				   struct intel_crtc *crtc,
108 				   struct intel_encoder *encoder);
109 	void (*update_ref_clks)(struct drm_i915_private *i915);
110 	void (*dump_hw_state)(struct drm_i915_private *i915,
111 			      const struct intel_dpll_hw_state *hw_state);
112 };
113 
114 static void
115 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
116 				  struct intel_shared_dpll_state *shared_dpll)
117 {
118 	struct intel_shared_dpll *pll;
119 	int i;
120 
121 	/* Copy shared dpll state */
122 	for_each_shared_dpll(i915, pll, i)
123 		shared_dpll[pll->index] = pll->state;
124 }
125 
126 static struct intel_shared_dpll_state *
127 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
128 {
129 	struct intel_atomic_state *state = to_intel_atomic_state(s);
130 
131 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
132 
133 	if (!state->dpll_set) {
134 		state->dpll_set = true;
135 
136 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
137 						  state->shared_dpll);
138 	}
139 
140 	return state->shared_dpll;
141 }
142 
143 /**
144  * intel_get_shared_dpll_by_id - get a DPLL given its id
145  * @i915: i915 device instance
146  * @id: pll id
147  *
148  * Returns:
149  * A pointer to the DPLL with @id
150  */
151 struct intel_shared_dpll *
152 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
153 			    enum intel_dpll_id id)
154 {
155 	struct intel_shared_dpll *pll;
156 	int i;
157 
158 	for_each_shared_dpll(i915, pll, i) {
159 		if (pll->info->id == id)
160 			return pll;
161 	}
162 
163 	MISSING_CASE(id);
164 	return NULL;
165 }
166 
167 /* For ILK+ */
168 void assert_shared_dpll(struct drm_i915_private *i915,
169 			struct intel_shared_dpll *pll,
170 			bool state)
171 {
172 	bool cur_state;
173 	struct intel_dpll_hw_state hw_state;
174 
175 	if (drm_WARN(&i915->drm, !pll,
176 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
177 		return;
178 
179 	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
180 	I915_STATE_WARN(i915, cur_state != state,
181 			"%s assertion failure (expected %s, current %s)\n",
182 			pll->info->name, str_on_off(state),
183 			str_on_off(cur_state));
184 }
185 
186 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
187 {
188 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
189 }
190 
191 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
192 {
193 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
194 }
195 
196 static i915_reg_t
197 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
198 			   struct intel_shared_dpll *pll)
199 {
200 	if (IS_DG1(i915))
201 		return DG1_DPLL_ENABLE(pll->info->id);
202 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
203 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
204 		return MG_PLL_ENABLE(0);
205 
206 	return ICL_DPLL_ENABLE(pll->info->id);
207 }
208 
209 static i915_reg_t
210 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
211 			struct intel_shared_dpll *pll)
212 {
213 	const enum intel_dpll_id id = pll->info->id;
214 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
215 
216 	if (IS_ALDERLAKE_P(i915))
217 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
218 
219 	return MG_PLL_ENABLE(tc_port);
220 }
221 
222 /**
223  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
224  * @crtc_state: CRTC, and its state, which has a shared DPLL
225  *
226  * Enable the shared DPLL used by @crtc.
227  */
228 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
229 {
230 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
231 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
232 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
233 	unsigned int pipe_mask = BIT(crtc->pipe);
234 	unsigned int old_mask;
235 
236 	if (drm_WARN_ON(&i915->drm, pll == NULL))
237 		return;
238 
239 	mutex_lock(&i915->display.dpll.lock);
240 	old_mask = pll->active_mask;
241 
242 	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
243 	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
244 		goto out;
245 
246 	pll->active_mask |= pipe_mask;
247 
248 	drm_dbg_kms(&i915->drm,
249 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
250 		    pll->info->name, pll->active_mask, pll->on,
251 		    crtc->base.base.id, crtc->base.name);
252 
253 	if (old_mask) {
254 		drm_WARN_ON(&i915->drm, !pll->on);
255 		assert_shared_dpll_enabled(i915, pll);
256 		goto out;
257 	}
258 	drm_WARN_ON(&i915->drm, pll->on);
259 
260 	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
261 	pll->info->funcs->enable(i915, pll);
262 	pll->on = true;
263 
264 out:
265 	mutex_unlock(&i915->display.dpll.lock);
266 }
267 
268 /**
269  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
270  * @crtc_state: CRTC, and its state, which has a shared DPLL
271  *
272  * Disable the shared DPLL used by @crtc.
273  */
274 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
275 {
276 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
277 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
278 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
279 	unsigned int pipe_mask = BIT(crtc->pipe);
280 
281 	/* PCH only available on ILK+ */
282 	if (DISPLAY_VER(i915) < 5)
283 		return;
284 
285 	if (pll == NULL)
286 		return;
287 
288 	mutex_lock(&i915->display.dpll.lock);
289 	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
290 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
291 		     crtc->base.base.id, crtc->base.name))
292 		goto out;
293 
294 	drm_dbg_kms(&i915->drm,
295 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
296 		    pll->info->name, pll->active_mask, pll->on,
297 		    crtc->base.base.id, crtc->base.name);
298 
299 	assert_shared_dpll_enabled(i915, pll);
300 	drm_WARN_ON(&i915->drm, !pll->on);
301 
302 	pll->active_mask &= ~pipe_mask;
303 	if (pll->active_mask)
304 		goto out;
305 
306 	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
307 	pll->info->funcs->disable(i915, pll);
308 	pll->on = false;
309 
310 out:
311 	mutex_unlock(&i915->display.dpll.lock);
312 }
313 
314 static unsigned long
315 intel_dpll_mask_all(struct drm_i915_private *i915)
316 {
317 	struct intel_shared_dpll *pll;
318 	unsigned long dpll_mask = 0;
319 	int i;
320 
321 	for_each_shared_dpll(i915, pll, i) {
322 		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
323 
324 		dpll_mask |= BIT(pll->info->id);
325 	}
326 
327 	return dpll_mask;
328 }
329 
330 static struct intel_shared_dpll *
331 intel_find_shared_dpll(struct intel_atomic_state *state,
332 		       const struct intel_crtc *crtc,
333 		       const struct intel_dpll_hw_state *pll_state,
334 		       unsigned long dpll_mask)
335 {
336 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
337 	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
338 	struct intel_shared_dpll_state *shared_dpll;
339 	struct intel_shared_dpll *unused_pll = NULL;
340 	enum intel_dpll_id id;
341 
342 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
343 
344 	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
345 
346 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
347 		struct intel_shared_dpll *pll;
348 
349 		pll = intel_get_shared_dpll_by_id(i915, id);
350 		if (!pll)
351 			continue;
352 
353 		/* Only want to check enabled timings first */
354 		if (shared_dpll[pll->index].pipe_mask == 0) {
355 			if (!unused_pll)
356 				unused_pll = pll;
357 			continue;
358 		}
359 
360 		if (memcmp(pll_state,
361 			   &shared_dpll[pll->index].hw_state,
362 			   sizeof(*pll_state)) == 0) {
363 			drm_dbg_kms(&i915->drm,
364 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
365 				    crtc->base.base.id, crtc->base.name,
366 				    pll->info->name,
367 				    shared_dpll[pll->index].pipe_mask,
368 				    pll->active_mask);
369 			return pll;
370 		}
371 	}
372 
373 	/* Ok no matching timings, maybe there's a free one? */
374 	if (unused_pll) {
375 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
376 			    crtc->base.base.id, crtc->base.name,
377 			    unused_pll->info->name);
378 		return unused_pll;
379 	}
380 
381 	return NULL;
382 }
383 
384 /**
385  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
386  * @crtc: CRTC on which behalf the reference is taken
387  * @pll: DPLL for which the reference is taken
388  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
389  *
390  * Take a reference for @pll tracking the use of it by @crtc.
391  */
392 static void
393 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
394 				 const struct intel_shared_dpll *pll,
395 				 struct intel_shared_dpll_state *shared_dpll_state)
396 {
397 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
398 
399 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
400 
401 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
402 
403 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
404 		    crtc->base.base.id, crtc->base.name, pll->info->name);
405 }
406 
407 static void
408 intel_reference_shared_dpll(struct intel_atomic_state *state,
409 			    const struct intel_crtc *crtc,
410 			    const struct intel_shared_dpll *pll,
411 			    const struct intel_dpll_hw_state *pll_state)
412 {
413 	struct intel_shared_dpll_state *shared_dpll;
414 
415 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
416 
417 	if (shared_dpll[pll->index].pipe_mask == 0)
418 		shared_dpll[pll->index].hw_state = *pll_state;
419 
420 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
421 }
422 
423 /**
424  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
425  * @crtc: CRTC on which behalf the reference is dropped
426  * @pll: DPLL for which the reference is dropped
427  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
428  *
429  * Drop a reference for @pll tracking the end of use of it by @crtc.
430  */
431 void
432 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
433 				   const struct intel_shared_dpll *pll,
434 				   struct intel_shared_dpll_state *shared_dpll_state)
435 {
436 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
437 
438 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
439 
440 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
441 
442 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
443 		    crtc->base.base.id, crtc->base.name, pll->info->name);
444 }
445 
446 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
447 					  const struct intel_crtc *crtc,
448 					  const struct intel_shared_dpll *pll)
449 {
450 	struct intel_shared_dpll_state *shared_dpll;
451 
452 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
453 
454 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
455 }
456 
457 static void intel_put_dpll(struct intel_atomic_state *state,
458 			   struct intel_crtc *crtc)
459 {
460 	const struct intel_crtc_state *old_crtc_state =
461 		intel_atomic_get_old_crtc_state(state, crtc);
462 	struct intel_crtc_state *new_crtc_state =
463 		intel_atomic_get_new_crtc_state(state, crtc);
464 
465 	new_crtc_state->shared_dpll = NULL;
466 
467 	if (!old_crtc_state->shared_dpll)
468 		return;
469 
470 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
471 }
472 
473 /**
474  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
475  * @state: atomic state
476  *
477  * This is the dpll version of drm_atomic_helper_swap_state() since the
478  * helper does not handle driver-specific global state.
479  *
480  * For consistency with atomic helpers this function does a complete swap,
481  * i.e. it also puts the current state into @state, even though there is no
482  * need for that at this moment.
483  */
484 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
485 {
486 	struct drm_i915_private *i915 = to_i915(state->base.dev);
487 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
488 	struct intel_shared_dpll *pll;
489 	int i;
490 
491 	if (!state->dpll_set)
492 		return;
493 
494 	for_each_shared_dpll(i915, pll, i)
495 		swap(pll->state, shared_dpll[pll->index]);
496 }
497 
498 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
499 				      struct intel_shared_dpll *pll,
500 				      struct intel_dpll_hw_state *hw_state)
501 {
502 	const enum intel_dpll_id id = pll->info->id;
503 	intel_wakeref_t wakeref;
504 	u32 val;
505 
506 	wakeref = intel_display_power_get_if_enabled(i915,
507 						     POWER_DOMAIN_DISPLAY_CORE);
508 	if (!wakeref)
509 		return false;
510 
511 	val = intel_de_read(i915, PCH_DPLL(id));
512 	hw_state->dpll = val;
513 	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
514 	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
515 
516 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
517 
518 	return val & DPLL_VCO_ENABLE;
519 }
520 
521 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
522 {
523 	u32 val;
524 	bool enabled;
525 
526 	val = intel_de_read(i915, PCH_DREF_CONTROL);
527 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
528 			    DREF_SUPERSPREAD_SOURCE_MASK));
529 	I915_STATE_WARN(i915, !enabled,
530 			"PCH refclk assertion failure, should be active but is disabled\n");
531 }
532 
533 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
534 				struct intel_shared_dpll *pll)
535 {
536 	const enum intel_dpll_id id = pll->info->id;
537 
538 	/* PCH refclock must be enabled first */
539 	ibx_assert_pch_refclk_enabled(i915);
540 
541 	intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
542 	intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
543 
544 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
545 
546 	/* Wait for the clocks to stabilize. */
547 	intel_de_posting_read(i915, PCH_DPLL(id));
548 	udelay(150);
549 
550 	/* The pixel multiplier can only be updated once the
551 	 * DPLL is enabled and the clocks are stable.
552 	 *
553 	 * So write it again.
554 	 */
555 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
556 	intel_de_posting_read(i915, PCH_DPLL(id));
557 	udelay(200);
558 }
559 
560 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
561 				 struct intel_shared_dpll *pll)
562 {
563 	const enum intel_dpll_id id = pll->info->id;
564 
565 	intel_de_write(i915, PCH_DPLL(id), 0);
566 	intel_de_posting_read(i915, PCH_DPLL(id));
567 	udelay(200);
568 }
569 
570 static int ibx_compute_dpll(struct intel_atomic_state *state,
571 			    struct intel_crtc *crtc,
572 			    struct intel_encoder *encoder)
573 {
574 	return 0;
575 }
576 
577 static int ibx_get_dpll(struct intel_atomic_state *state,
578 			struct intel_crtc *crtc,
579 			struct intel_encoder *encoder)
580 {
581 	struct intel_crtc_state *crtc_state =
582 		intel_atomic_get_new_crtc_state(state, crtc);
583 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
584 	struct intel_shared_dpll *pll;
585 	enum intel_dpll_id id;
586 
587 	if (HAS_PCH_IBX(i915)) {
588 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
589 		id = (enum intel_dpll_id) crtc->pipe;
590 		pll = intel_get_shared_dpll_by_id(i915, id);
591 
592 		drm_dbg_kms(&i915->drm,
593 			    "[CRTC:%d:%s] using pre-allocated %s\n",
594 			    crtc->base.base.id, crtc->base.name,
595 			    pll->info->name);
596 	} else {
597 		pll = intel_find_shared_dpll(state, crtc,
598 					     &crtc_state->dpll_hw_state,
599 					     BIT(DPLL_ID_PCH_PLL_B) |
600 					     BIT(DPLL_ID_PCH_PLL_A));
601 	}
602 
603 	if (!pll)
604 		return -EINVAL;
605 
606 	/* reference the pll */
607 	intel_reference_shared_dpll(state, crtc,
608 				    pll, &crtc_state->dpll_hw_state);
609 
610 	crtc_state->shared_dpll = pll;
611 
612 	return 0;
613 }
614 
615 static void ibx_dump_hw_state(struct drm_i915_private *i915,
616 			      const struct intel_dpll_hw_state *hw_state)
617 {
618 	drm_dbg_kms(&i915->drm,
619 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
620 		    "fp0: 0x%x, fp1: 0x%x\n",
621 		    hw_state->dpll,
622 		    hw_state->dpll_md,
623 		    hw_state->fp0,
624 		    hw_state->fp1);
625 }
626 
627 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
628 	.enable = ibx_pch_dpll_enable,
629 	.disable = ibx_pch_dpll_disable,
630 	.get_hw_state = ibx_pch_dpll_get_hw_state,
631 };
632 
633 static const struct dpll_info pch_plls[] = {
634 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
635 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
636 	{ },
637 };
638 
639 static const struct intel_dpll_mgr pch_pll_mgr = {
640 	.dpll_info = pch_plls,
641 	.compute_dplls = ibx_compute_dpll,
642 	.get_dplls = ibx_get_dpll,
643 	.put_dplls = intel_put_dpll,
644 	.dump_hw_state = ibx_dump_hw_state,
645 };
646 
647 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
648 				 struct intel_shared_dpll *pll)
649 {
650 	const enum intel_dpll_id id = pll->info->id;
651 
652 	intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
653 	intel_de_posting_read(i915, WRPLL_CTL(id));
654 	udelay(20);
655 }
656 
657 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
658 				struct intel_shared_dpll *pll)
659 {
660 	intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
661 	intel_de_posting_read(i915, SPLL_CTL);
662 	udelay(20);
663 }
664 
665 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
666 				  struct intel_shared_dpll *pll)
667 {
668 	const enum intel_dpll_id id = pll->info->id;
669 
670 	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
671 	intel_de_posting_read(i915, WRPLL_CTL(id));
672 
673 	/*
674 	 * Try to set up the PCH reference clock once all DPLLs
675 	 * that depend on it have been shut down.
676 	 */
677 	if (i915->display.dpll.pch_ssc_use & BIT(id))
678 		intel_init_pch_refclk(i915);
679 }
680 
681 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
682 				 struct intel_shared_dpll *pll)
683 {
684 	enum intel_dpll_id id = pll->info->id;
685 
686 	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
687 	intel_de_posting_read(i915, SPLL_CTL);
688 
689 	/*
690 	 * Try to set up the PCH reference clock once all DPLLs
691 	 * that depend on it have been shut down.
692 	 */
693 	if (i915->display.dpll.pch_ssc_use & BIT(id))
694 		intel_init_pch_refclk(i915);
695 }
696 
697 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
698 				       struct intel_shared_dpll *pll,
699 				       struct intel_dpll_hw_state *hw_state)
700 {
701 	const enum intel_dpll_id id = pll->info->id;
702 	intel_wakeref_t wakeref;
703 	u32 val;
704 
705 	wakeref = intel_display_power_get_if_enabled(i915,
706 						     POWER_DOMAIN_DISPLAY_CORE);
707 	if (!wakeref)
708 		return false;
709 
710 	val = intel_de_read(i915, WRPLL_CTL(id));
711 	hw_state->wrpll = val;
712 
713 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
714 
715 	return val & WRPLL_PLL_ENABLE;
716 }
717 
718 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
719 				      struct intel_shared_dpll *pll,
720 				      struct intel_dpll_hw_state *hw_state)
721 {
722 	intel_wakeref_t wakeref;
723 	u32 val;
724 
725 	wakeref = intel_display_power_get_if_enabled(i915,
726 						     POWER_DOMAIN_DISPLAY_CORE);
727 	if (!wakeref)
728 		return false;
729 
730 	val = intel_de_read(i915, SPLL_CTL);
731 	hw_state->spll = val;
732 
733 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
734 
735 	return val & SPLL_PLL_ENABLE;
736 }
737 
738 #define LC_FREQ 2700
739 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
740 
741 #define P_MIN 2
742 #define P_MAX 64
743 #define P_INC 2
744 
745 /* Constraints for PLL good behavior */
746 #define REF_MIN 48
747 #define REF_MAX 400
748 #define VCO_MIN 2400
749 #define VCO_MAX 4800
750 
751 struct hsw_wrpll_rnp {
752 	unsigned p, n2, r2;
753 };
754 
755 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
756 {
757 	switch (clock) {
758 	case 25175000:
759 	case 25200000:
760 	case 27000000:
761 	case 27027000:
762 	case 37762500:
763 	case 37800000:
764 	case 40500000:
765 	case 40541000:
766 	case 54000000:
767 	case 54054000:
768 	case 59341000:
769 	case 59400000:
770 	case 72000000:
771 	case 74176000:
772 	case 74250000:
773 	case 81000000:
774 	case 81081000:
775 	case 89012000:
776 	case 89100000:
777 	case 108000000:
778 	case 108108000:
779 	case 111264000:
780 	case 111375000:
781 	case 148352000:
782 	case 148500000:
783 	case 162000000:
784 	case 162162000:
785 	case 222525000:
786 	case 222750000:
787 	case 296703000:
788 	case 297000000:
789 		return 0;
790 	case 233500000:
791 	case 245250000:
792 	case 247750000:
793 	case 253250000:
794 	case 298000000:
795 		return 1500;
796 	case 169128000:
797 	case 169500000:
798 	case 179500000:
799 	case 202000000:
800 		return 2000;
801 	case 256250000:
802 	case 262500000:
803 	case 270000000:
804 	case 272500000:
805 	case 273750000:
806 	case 280750000:
807 	case 281250000:
808 	case 286000000:
809 	case 291750000:
810 		return 4000;
811 	case 267250000:
812 	case 268500000:
813 		return 5000;
814 	default:
815 		return 1000;
816 	}
817 }
818 
819 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
820 				 unsigned int r2, unsigned int n2,
821 				 unsigned int p,
822 				 struct hsw_wrpll_rnp *best)
823 {
824 	u64 a, b, c, d, diff, diff_best;
825 
826 	/* No best (r,n,p) yet */
827 	if (best->p == 0) {
828 		best->p = p;
829 		best->n2 = n2;
830 		best->r2 = r2;
831 		return;
832 	}
833 
834 	/*
835 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
836 	 * freq2k.
837 	 *
838 	 * delta = 1e6 *
839 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
840 	 *	   freq2k;
841 	 *
842 	 * and we would like delta <= budget.
843 	 *
844 	 * If the discrepancy is above the PPM-based budget, always prefer to
845 	 * improve upon the previous solution.  However, if you're within the
846 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
847 	 */
848 	a = freq2k * budget * p * r2;
849 	b = freq2k * budget * best->p * best->r2;
850 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
851 	diff_best = abs_diff(freq2k * best->p * best->r2,
852 			     LC_FREQ_2K * best->n2);
853 	c = 1000000 * diff;
854 	d = 1000000 * diff_best;
855 
856 	if (a < c && b < d) {
857 		/* If both are above the budget, pick the closer */
858 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
859 			best->p = p;
860 			best->n2 = n2;
861 			best->r2 = r2;
862 		}
863 	} else if (a >= c && b < d) {
864 		/* If A is below the threshold but B is above it?  Update. */
865 		best->p = p;
866 		best->n2 = n2;
867 		best->r2 = r2;
868 	} else if (a >= c && b >= d) {
869 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
870 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
871 			best->p = p;
872 			best->n2 = n2;
873 			best->r2 = r2;
874 		}
875 	}
876 	/* Otherwise a < c && b >= d, do nothing */
877 }
878 
879 static void
880 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
881 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
882 {
883 	u64 freq2k;
884 	unsigned p, n2, r2;
885 	struct hsw_wrpll_rnp best = {};
886 	unsigned budget;
887 
888 	freq2k = clock / 100;
889 
890 	budget = hsw_wrpll_get_budget_for_freq(clock);
891 
892 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
893 	 * and directly pass the LC PLL to it. */
894 	if (freq2k == 5400000) {
895 		*n2_out = 2;
896 		*p_out = 1;
897 		*r2_out = 2;
898 		return;
899 	}
900 
901 	/*
902 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
903 	 * the WR PLL.
904 	 *
905 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
906 	 * Injecting R2 = 2 * R gives:
907 	 *   REF_MAX * r2 > LC_FREQ * 2 and
908 	 *   REF_MIN * r2 < LC_FREQ * 2
909 	 *
910 	 * Which means the desired boundaries for r2 are:
911 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
912 	 *
913 	 */
914 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
915 	     r2 <= LC_FREQ * 2 / REF_MIN;
916 	     r2++) {
917 
918 		/*
919 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
920 		 *
921 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
922 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
923 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
924 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
925 		 *
926 		 * Which means the desired boundaries for n2 are:
927 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
928 		 */
929 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
930 		     n2 <= VCO_MAX * r2 / LC_FREQ;
931 		     n2++) {
932 
933 			for (p = P_MIN; p <= P_MAX; p += P_INC)
934 				hsw_wrpll_update_rnp(freq2k, budget,
935 						     r2, n2, p, &best);
936 		}
937 	}
938 
939 	*n2_out = best.n2;
940 	*p_out = best.p;
941 	*r2_out = best.r2;
942 }
943 
944 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
945 				  const struct intel_shared_dpll *pll,
946 				  const struct intel_dpll_hw_state *pll_state)
947 {
948 	int refclk;
949 	int n, p, r;
950 	u32 wrpll = pll_state->wrpll;
951 
952 	switch (wrpll & WRPLL_REF_MASK) {
953 	case WRPLL_REF_SPECIAL_HSW:
954 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
955 		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
956 			refclk = i915->display.dpll.ref_clks.nssc;
957 			break;
958 		}
959 		fallthrough;
960 	case WRPLL_REF_PCH_SSC:
961 		/*
962 		 * We could calculate spread here, but our checking
963 		 * code only cares about 5% accuracy, and spread is a max of
964 		 * 0.5% downspread.
965 		 */
966 		refclk = i915->display.dpll.ref_clks.ssc;
967 		break;
968 	case WRPLL_REF_LCPLL:
969 		refclk = 2700000;
970 		break;
971 	default:
972 		MISSING_CASE(wrpll);
973 		return 0;
974 	}
975 
976 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
977 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
978 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
979 
980 	/* Convert to KHz, p & r have a fixed point portion */
981 	return (refclk * n / 10) / (p * r) * 2;
982 }
983 
984 static int
985 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
986 			   struct intel_crtc *crtc)
987 {
988 	struct drm_i915_private *i915 = to_i915(state->base.dev);
989 	struct intel_crtc_state *crtc_state =
990 		intel_atomic_get_new_crtc_state(state, crtc);
991 	unsigned int p, n2, r2;
992 
993 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
994 
995 	crtc_state->dpll_hw_state.wrpll =
996 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
997 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
998 		WRPLL_DIVIDER_POST(p);
999 
1000 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1001 							&crtc_state->dpll_hw_state);
1002 
1003 	return 0;
1004 }
1005 
1006 static struct intel_shared_dpll *
1007 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1008 		       struct intel_crtc *crtc)
1009 {
1010 	struct intel_crtc_state *crtc_state =
1011 		intel_atomic_get_new_crtc_state(state, crtc);
1012 
1013 	return intel_find_shared_dpll(state, crtc,
1014 				      &crtc_state->dpll_hw_state,
1015 				      BIT(DPLL_ID_WRPLL2) |
1016 				      BIT(DPLL_ID_WRPLL1));
1017 }
1018 
1019 static int
1020 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1021 {
1022 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1023 	int clock = crtc_state->port_clock;
1024 
1025 	switch (clock / 2) {
1026 	case 81000:
1027 	case 135000:
1028 	case 270000:
1029 		return 0;
1030 	default:
1031 		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1032 			    clock);
1033 		return -EINVAL;
1034 	}
1035 }
1036 
1037 static struct intel_shared_dpll *
1038 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1039 {
1040 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1041 	struct intel_shared_dpll *pll;
1042 	enum intel_dpll_id pll_id;
1043 	int clock = crtc_state->port_clock;
1044 
1045 	switch (clock / 2) {
1046 	case 81000:
1047 		pll_id = DPLL_ID_LCPLL_810;
1048 		break;
1049 	case 135000:
1050 		pll_id = DPLL_ID_LCPLL_1350;
1051 		break;
1052 	case 270000:
1053 		pll_id = DPLL_ID_LCPLL_2700;
1054 		break;
1055 	default:
1056 		MISSING_CASE(clock / 2);
1057 		return NULL;
1058 	}
1059 
1060 	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1061 
1062 	if (!pll)
1063 		return NULL;
1064 
1065 	return pll;
1066 }
1067 
1068 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1069 				  const struct intel_shared_dpll *pll,
1070 				  const struct intel_dpll_hw_state *pll_state)
1071 {
1072 	int link_clock = 0;
1073 
1074 	switch (pll->info->id) {
1075 	case DPLL_ID_LCPLL_810:
1076 		link_clock = 81000;
1077 		break;
1078 	case DPLL_ID_LCPLL_1350:
1079 		link_clock = 135000;
1080 		break;
1081 	case DPLL_ID_LCPLL_2700:
1082 		link_clock = 270000;
1083 		break;
1084 	default:
1085 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1086 		break;
1087 	}
1088 
1089 	return link_clock * 2;
1090 }
1091 
1092 static int
1093 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1094 			  struct intel_crtc *crtc)
1095 {
1096 	struct intel_crtc_state *crtc_state =
1097 		intel_atomic_get_new_crtc_state(state, crtc);
1098 
1099 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1100 		return -EINVAL;
1101 
1102 	crtc_state->dpll_hw_state.spll =
1103 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1104 
1105 	return 0;
1106 }
1107 
1108 static struct intel_shared_dpll *
1109 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1110 		      struct intel_crtc *crtc)
1111 {
1112 	struct intel_crtc_state *crtc_state =
1113 		intel_atomic_get_new_crtc_state(state, crtc);
1114 
1115 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1116 				      BIT(DPLL_ID_SPLL));
1117 }
1118 
1119 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1120 				 const struct intel_shared_dpll *pll,
1121 				 const struct intel_dpll_hw_state *pll_state)
1122 {
1123 	int link_clock = 0;
1124 
1125 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1126 	case SPLL_FREQ_810MHz:
1127 		link_clock = 81000;
1128 		break;
1129 	case SPLL_FREQ_1350MHz:
1130 		link_clock = 135000;
1131 		break;
1132 	case SPLL_FREQ_2700MHz:
1133 		link_clock = 270000;
1134 		break;
1135 	default:
1136 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1137 		break;
1138 	}
1139 
1140 	return link_clock * 2;
1141 }
1142 
1143 static int hsw_compute_dpll(struct intel_atomic_state *state,
1144 			    struct intel_crtc *crtc,
1145 			    struct intel_encoder *encoder)
1146 {
1147 	struct intel_crtc_state *crtc_state =
1148 		intel_atomic_get_new_crtc_state(state, crtc);
1149 
1150 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1151 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1152 	else if (intel_crtc_has_dp_encoder(crtc_state))
1153 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1154 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1155 		return hsw_ddi_spll_compute_dpll(state, crtc);
1156 	else
1157 		return -EINVAL;
1158 }
1159 
1160 static int hsw_get_dpll(struct intel_atomic_state *state,
1161 			struct intel_crtc *crtc,
1162 			struct intel_encoder *encoder)
1163 {
1164 	struct intel_crtc_state *crtc_state =
1165 		intel_atomic_get_new_crtc_state(state, crtc);
1166 	struct intel_shared_dpll *pll = NULL;
1167 
1168 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1169 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1170 	else if (intel_crtc_has_dp_encoder(crtc_state))
1171 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1172 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1173 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1174 
1175 	if (!pll)
1176 		return -EINVAL;
1177 
1178 	intel_reference_shared_dpll(state, crtc,
1179 				    pll, &crtc_state->dpll_hw_state);
1180 
1181 	crtc_state->shared_dpll = pll;
1182 
1183 	return 0;
1184 }
1185 
1186 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1187 {
1188 	i915->display.dpll.ref_clks.ssc = 135000;
1189 	/* Non-SSC is only used on non-ULT HSW. */
1190 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1191 		i915->display.dpll.ref_clks.nssc = 24000;
1192 	else
1193 		i915->display.dpll.ref_clks.nssc = 135000;
1194 }
1195 
1196 static void hsw_dump_hw_state(struct drm_i915_private *i915,
1197 			      const struct intel_dpll_hw_state *hw_state)
1198 {
1199 	drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1200 		    hw_state->wrpll, hw_state->spll);
1201 }
1202 
1203 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1204 	.enable = hsw_ddi_wrpll_enable,
1205 	.disable = hsw_ddi_wrpll_disable,
1206 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1207 	.get_freq = hsw_ddi_wrpll_get_freq,
1208 };
1209 
1210 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1211 	.enable = hsw_ddi_spll_enable,
1212 	.disable = hsw_ddi_spll_disable,
1213 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1214 	.get_freq = hsw_ddi_spll_get_freq,
1215 };
1216 
1217 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1218 				 struct intel_shared_dpll *pll)
1219 {
1220 }
1221 
1222 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1223 				  struct intel_shared_dpll *pll)
1224 {
1225 }
1226 
1227 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1228 				       struct intel_shared_dpll *pll,
1229 				       struct intel_dpll_hw_state *hw_state)
1230 {
1231 	return true;
1232 }
1233 
1234 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1235 	.enable = hsw_ddi_lcpll_enable,
1236 	.disable = hsw_ddi_lcpll_disable,
1237 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1238 	.get_freq = hsw_ddi_lcpll_get_freq,
1239 };
1240 
1241 static const struct dpll_info hsw_plls[] = {
1242 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1243 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1244 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1245 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1246 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1247 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1248 	{ },
1249 };
1250 
1251 static const struct intel_dpll_mgr hsw_pll_mgr = {
1252 	.dpll_info = hsw_plls,
1253 	.compute_dplls = hsw_compute_dpll,
1254 	.get_dplls = hsw_get_dpll,
1255 	.put_dplls = intel_put_dpll,
1256 	.update_ref_clks = hsw_update_dpll_ref_clks,
1257 	.dump_hw_state = hsw_dump_hw_state,
1258 };
1259 
1260 struct skl_dpll_regs {
1261 	i915_reg_t ctl, cfgcr1, cfgcr2;
1262 };
1263 
1264 /* this array is indexed by the *shared* pll id */
1265 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1266 	{
1267 		/* DPLL 0 */
1268 		.ctl = LCPLL1_CTL,
1269 		/* DPLL 0 doesn't support HDMI mode */
1270 	},
1271 	{
1272 		/* DPLL 1 */
1273 		.ctl = LCPLL2_CTL,
1274 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1275 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1276 	},
1277 	{
1278 		/* DPLL 2 */
1279 		.ctl = WRPLL_CTL(0),
1280 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1281 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1282 	},
1283 	{
1284 		/* DPLL 3 */
1285 		.ctl = WRPLL_CTL(1),
1286 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1287 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1288 	},
1289 };
1290 
1291 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1292 				    struct intel_shared_dpll *pll)
1293 {
1294 	const enum intel_dpll_id id = pll->info->id;
1295 
1296 	intel_de_rmw(i915, DPLL_CTRL1,
1297 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1298 		     pll->state.hw_state.ctrl1 << (id * 6));
1299 	intel_de_posting_read(i915, DPLL_CTRL1);
1300 }
1301 
1302 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1303 			       struct intel_shared_dpll *pll)
1304 {
1305 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1306 	const enum intel_dpll_id id = pll->info->id;
1307 
1308 	skl_ddi_pll_write_ctrl1(i915, pll);
1309 
1310 	intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1311 	intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1312 	intel_de_posting_read(i915, regs[id].cfgcr1);
1313 	intel_de_posting_read(i915, regs[id].cfgcr2);
1314 
1315 	/* the enable bit is always bit 31 */
1316 	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1317 
1318 	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1319 		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1320 }
1321 
1322 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1323 				 struct intel_shared_dpll *pll)
1324 {
1325 	skl_ddi_pll_write_ctrl1(i915, pll);
1326 }
1327 
1328 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1329 				struct intel_shared_dpll *pll)
1330 {
1331 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1332 	const enum intel_dpll_id id = pll->info->id;
1333 
1334 	/* the enable bit is always bit 31 */
1335 	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1336 	intel_de_posting_read(i915, regs[id].ctl);
1337 }
1338 
1339 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1340 				  struct intel_shared_dpll *pll)
1341 {
1342 }
1343 
1344 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1345 				     struct intel_shared_dpll *pll,
1346 				     struct intel_dpll_hw_state *hw_state)
1347 {
1348 	u32 val;
1349 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1350 	const enum intel_dpll_id id = pll->info->id;
1351 	intel_wakeref_t wakeref;
1352 	bool ret;
1353 
1354 	wakeref = intel_display_power_get_if_enabled(i915,
1355 						     POWER_DOMAIN_DISPLAY_CORE);
1356 	if (!wakeref)
1357 		return false;
1358 
1359 	ret = false;
1360 
1361 	val = intel_de_read(i915, regs[id].ctl);
1362 	if (!(val & LCPLL_PLL_ENABLE))
1363 		goto out;
1364 
1365 	val = intel_de_read(i915, DPLL_CTRL1);
1366 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1367 
1368 	/* avoid reading back stale values if HDMI mode is not enabled */
1369 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1370 		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1371 		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1372 	}
1373 	ret = true;
1374 
1375 out:
1376 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1377 
1378 	return ret;
1379 }
1380 
1381 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1382 				       struct intel_shared_dpll *pll,
1383 				       struct intel_dpll_hw_state *hw_state)
1384 {
1385 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1386 	const enum intel_dpll_id id = pll->info->id;
1387 	intel_wakeref_t wakeref;
1388 	u32 val;
1389 	bool ret;
1390 
1391 	wakeref = intel_display_power_get_if_enabled(i915,
1392 						     POWER_DOMAIN_DISPLAY_CORE);
1393 	if (!wakeref)
1394 		return false;
1395 
1396 	ret = false;
1397 
1398 	/* DPLL0 is always enabled since it drives CDCLK */
1399 	val = intel_de_read(i915, regs[id].ctl);
1400 	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1401 		goto out;
1402 
1403 	val = intel_de_read(i915, DPLL_CTRL1);
1404 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1405 
1406 	ret = true;
1407 
1408 out:
1409 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1410 
1411 	return ret;
1412 }
1413 
1414 struct skl_wrpll_context {
1415 	u64 min_deviation;		/* current minimal deviation */
1416 	u64 central_freq;		/* chosen central freq */
1417 	u64 dco_freq;			/* chosen dco freq */
1418 	unsigned int p;			/* chosen divider */
1419 };
1420 
1421 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1422 #define SKL_DCO_MAX_PDEVIATION	100
1423 #define SKL_DCO_MAX_NDEVIATION	600
1424 
1425 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1426 				  u64 central_freq,
1427 				  u64 dco_freq,
1428 				  unsigned int divider)
1429 {
1430 	u64 deviation;
1431 
1432 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1433 			      central_freq);
1434 
1435 	/* positive deviation */
1436 	if (dco_freq >= central_freq) {
1437 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1438 		    deviation < ctx->min_deviation) {
1439 			ctx->min_deviation = deviation;
1440 			ctx->central_freq = central_freq;
1441 			ctx->dco_freq = dco_freq;
1442 			ctx->p = divider;
1443 		}
1444 	/* negative deviation */
1445 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1446 		   deviation < ctx->min_deviation) {
1447 		ctx->min_deviation = deviation;
1448 		ctx->central_freq = central_freq;
1449 		ctx->dco_freq = dco_freq;
1450 		ctx->p = divider;
1451 	}
1452 }
1453 
1454 static void skl_wrpll_get_multipliers(unsigned int p,
1455 				      unsigned int *p0 /* out */,
1456 				      unsigned int *p1 /* out */,
1457 				      unsigned int *p2 /* out */)
1458 {
1459 	/* even dividers */
1460 	if (p % 2 == 0) {
1461 		unsigned int half = p / 2;
1462 
1463 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1464 			*p0 = 2;
1465 			*p1 = 1;
1466 			*p2 = half;
1467 		} else if (half % 2 == 0) {
1468 			*p0 = 2;
1469 			*p1 = half / 2;
1470 			*p2 = 2;
1471 		} else if (half % 3 == 0) {
1472 			*p0 = 3;
1473 			*p1 = half / 3;
1474 			*p2 = 2;
1475 		} else if (half % 7 == 0) {
1476 			*p0 = 7;
1477 			*p1 = half / 7;
1478 			*p2 = 2;
1479 		}
1480 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1481 		*p0 = 3;
1482 		*p1 = 1;
1483 		*p2 = p / 3;
1484 	} else if (p == 5 || p == 7) {
1485 		*p0 = p;
1486 		*p1 = 1;
1487 		*p2 = 1;
1488 	} else if (p == 15) {
1489 		*p0 = 3;
1490 		*p1 = 1;
1491 		*p2 = 5;
1492 	} else if (p == 21) {
1493 		*p0 = 7;
1494 		*p1 = 1;
1495 		*p2 = 3;
1496 	} else if (p == 35) {
1497 		*p0 = 7;
1498 		*p1 = 1;
1499 		*p2 = 5;
1500 	}
1501 }
1502 
1503 struct skl_wrpll_params {
1504 	u32 dco_fraction;
1505 	u32 dco_integer;
1506 	u32 qdiv_ratio;
1507 	u32 qdiv_mode;
1508 	u32 kdiv;
1509 	u32 pdiv;
1510 	u32 central_freq;
1511 };
1512 
1513 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1514 				      u64 afe_clock,
1515 				      int ref_clock,
1516 				      u64 central_freq,
1517 				      u32 p0, u32 p1, u32 p2)
1518 {
1519 	u64 dco_freq;
1520 
1521 	switch (central_freq) {
1522 	case 9600000000ULL:
1523 		params->central_freq = 0;
1524 		break;
1525 	case 9000000000ULL:
1526 		params->central_freq = 1;
1527 		break;
1528 	case 8400000000ULL:
1529 		params->central_freq = 3;
1530 	}
1531 
1532 	switch (p0) {
1533 	case 1:
1534 		params->pdiv = 0;
1535 		break;
1536 	case 2:
1537 		params->pdiv = 1;
1538 		break;
1539 	case 3:
1540 		params->pdiv = 2;
1541 		break;
1542 	case 7:
1543 		params->pdiv = 4;
1544 		break;
1545 	default:
1546 		WARN(1, "Incorrect PDiv\n");
1547 	}
1548 
1549 	switch (p2) {
1550 	case 5:
1551 		params->kdiv = 0;
1552 		break;
1553 	case 2:
1554 		params->kdiv = 1;
1555 		break;
1556 	case 3:
1557 		params->kdiv = 2;
1558 		break;
1559 	case 1:
1560 		params->kdiv = 3;
1561 		break;
1562 	default:
1563 		WARN(1, "Incorrect KDiv\n");
1564 	}
1565 
1566 	params->qdiv_ratio = p1;
1567 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1568 
1569 	dco_freq = p0 * p1 * p2 * afe_clock;
1570 
1571 	/*
1572 	 * Intermediate values are in Hz.
1573 	 * Divide by MHz to match bsepc
1574 	 */
1575 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1576 	params->dco_fraction =
1577 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1578 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1579 }
1580 
1581 static int
1582 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1583 			int ref_clock,
1584 			struct skl_wrpll_params *wrpll_params)
1585 {
1586 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1587 						 9000000000ULL,
1588 						 9600000000ULL };
1589 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1590 					    24, 28, 30, 32, 36, 40, 42, 44,
1591 					    48, 52, 54, 56, 60, 64, 66, 68,
1592 					    70, 72, 76, 78, 80, 84, 88, 90,
1593 					    92, 96, 98 };
1594 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1595 	static const struct {
1596 		const u8 *list;
1597 		int n_dividers;
1598 	} dividers[] = {
1599 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1600 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1601 	};
1602 	struct skl_wrpll_context ctx = {
1603 		.min_deviation = U64_MAX,
1604 	};
1605 	unsigned int dco, d, i;
1606 	unsigned int p0, p1, p2;
1607 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1608 
1609 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1610 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1611 			for (i = 0; i < dividers[d].n_dividers; i++) {
1612 				unsigned int p = dividers[d].list[i];
1613 				u64 dco_freq = p * afe_clock;
1614 
1615 				skl_wrpll_try_divider(&ctx,
1616 						      dco_central_freq[dco],
1617 						      dco_freq,
1618 						      p);
1619 				/*
1620 				 * Skip the remaining dividers if we're sure to
1621 				 * have found the definitive divider, we can't
1622 				 * improve a 0 deviation.
1623 				 */
1624 				if (ctx.min_deviation == 0)
1625 					goto skip_remaining_dividers;
1626 			}
1627 		}
1628 
1629 skip_remaining_dividers:
1630 		/*
1631 		 * If a solution is found with an even divider, prefer
1632 		 * this one.
1633 		 */
1634 		if (d == 0 && ctx.p)
1635 			break;
1636 	}
1637 
1638 	if (!ctx.p)
1639 		return -EINVAL;
1640 
1641 	/*
1642 	 * gcc incorrectly analyses that these can be used without being
1643 	 * initialized. To be fair, it's hard to guess.
1644 	 */
1645 	p0 = p1 = p2 = 0;
1646 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1647 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1648 				  ctx.central_freq, p0, p1, p2);
1649 
1650 	return 0;
1651 }
1652 
1653 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1654 				  const struct intel_shared_dpll *pll,
1655 				  const struct intel_dpll_hw_state *pll_state)
1656 {
1657 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1658 	u32 p0, p1, p2, dco_freq;
1659 
1660 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1661 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1662 
1663 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1664 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1665 	else
1666 		p1 = 1;
1667 
1668 
1669 	switch (p0) {
1670 	case DPLL_CFGCR2_PDIV_1:
1671 		p0 = 1;
1672 		break;
1673 	case DPLL_CFGCR2_PDIV_2:
1674 		p0 = 2;
1675 		break;
1676 	case DPLL_CFGCR2_PDIV_3:
1677 		p0 = 3;
1678 		break;
1679 	case DPLL_CFGCR2_PDIV_7_INVALID:
1680 		/*
1681 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1682 		 * handling it the same way as PDIV_7.
1683 		 */
1684 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1685 		fallthrough;
1686 	case DPLL_CFGCR2_PDIV_7:
1687 		p0 = 7;
1688 		break;
1689 	default:
1690 		MISSING_CASE(p0);
1691 		return 0;
1692 	}
1693 
1694 	switch (p2) {
1695 	case DPLL_CFGCR2_KDIV_5:
1696 		p2 = 5;
1697 		break;
1698 	case DPLL_CFGCR2_KDIV_2:
1699 		p2 = 2;
1700 		break;
1701 	case DPLL_CFGCR2_KDIV_3:
1702 		p2 = 3;
1703 		break;
1704 	case DPLL_CFGCR2_KDIV_1:
1705 		p2 = 1;
1706 		break;
1707 	default:
1708 		MISSING_CASE(p2);
1709 		return 0;
1710 	}
1711 
1712 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1713 		   ref_clock;
1714 
1715 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1716 		    ref_clock / 0x8000;
1717 
1718 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1719 		return 0;
1720 
1721 	return dco_freq / (p0 * p1 * p2 * 5);
1722 }
1723 
1724 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1725 {
1726 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1727 	struct skl_wrpll_params wrpll_params = {};
1728 	u32 ctrl1, cfgcr1, cfgcr2;
1729 	int ret;
1730 
1731 	/*
1732 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1733 	 * as the DPLL id in this function.
1734 	 */
1735 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1736 
1737 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1738 
1739 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1740 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1741 	if (ret)
1742 		return ret;
1743 
1744 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1745 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1746 		wrpll_params.dco_integer;
1747 
1748 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1749 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1750 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1751 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1752 		wrpll_params.central_freq;
1753 
1754 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1755 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1756 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1757 
1758 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1759 							&crtc_state->dpll_hw_state);
1760 
1761 	return 0;
1762 }
1763 
1764 static int
1765 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1766 {
1767 	u32 ctrl1;
1768 
1769 	/*
1770 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1771 	 * as the DPLL id in this function.
1772 	 */
1773 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1774 	switch (crtc_state->port_clock / 2) {
1775 	case 81000:
1776 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1777 		break;
1778 	case 135000:
1779 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1780 		break;
1781 	case 270000:
1782 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1783 		break;
1784 		/* eDP 1.4 rates */
1785 	case 162000:
1786 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1787 		break;
1788 	case 108000:
1789 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1790 		break;
1791 	case 216000:
1792 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1793 		break;
1794 	}
1795 
1796 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1797 
1798 	return 0;
1799 }
1800 
1801 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1802 				  const struct intel_shared_dpll *pll,
1803 				  const struct intel_dpll_hw_state *pll_state)
1804 {
1805 	int link_clock = 0;
1806 
1807 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1808 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1809 	case DPLL_CTRL1_LINK_RATE_810:
1810 		link_clock = 81000;
1811 		break;
1812 	case DPLL_CTRL1_LINK_RATE_1080:
1813 		link_clock = 108000;
1814 		break;
1815 	case DPLL_CTRL1_LINK_RATE_1350:
1816 		link_clock = 135000;
1817 		break;
1818 	case DPLL_CTRL1_LINK_RATE_1620:
1819 		link_clock = 162000;
1820 		break;
1821 	case DPLL_CTRL1_LINK_RATE_2160:
1822 		link_clock = 216000;
1823 		break;
1824 	case DPLL_CTRL1_LINK_RATE_2700:
1825 		link_clock = 270000;
1826 		break;
1827 	default:
1828 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1829 		break;
1830 	}
1831 
1832 	return link_clock * 2;
1833 }
1834 
1835 static int skl_compute_dpll(struct intel_atomic_state *state,
1836 			    struct intel_crtc *crtc,
1837 			    struct intel_encoder *encoder)
1838 {
1839 	struct intel_crtc_state *crtc_state =
1840 		intel_atomic_get_new_crtc_state(state, crtc);
1841 
1842 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1843 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1844 	else if (intel_crtc_has_dp_encoder(crtc_state))
1845 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1846 	else
1847 		return -EINVAL;
1848 }
1849 
1850 static int skl_get_dpll(struct intel_atomic_state *state,
1851 			struct intel_crtc *crtc,
1852 			struct intel_encoder *encoder)
1853 {
1854 	struct intel_crtc_state *crtc_state =
1855 		intel_atomic_get_new_crtc_state(state, crtc);
1856 	struct intel_shared_dpll *pll;
1857 
1858 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1859 		pll = intel_find_shared_dpll(state, crtc,
1860 					     &crtc_state->dpll_hw_state,
1861 					     BIT(DPLL_ID_SKL_DPLL0));
1862 	else
1863 		pll = intel_find_shared_dpll(state, crtc,
1864 					     &crtc_state->dpll_hw_state,
1865 					     BIT(DPLL_ID_SKL_DPLL3) |
1866 					     BIT(DPLL_ID_SKL_DPLL2) |
1867 					     BIT(DPLL_ID_SKL_DPLL1));
1868 	if (!pll)
1869 		return -EINVAL;
1870 
1871 	intel_reference_shared_dpll(state, crtc,
1872 				    pll, &crtc_state->dpll_hw_state);
1873 
1874 	crtc_state->shared_dpll = pll;
1875 
1876 	return 0;
1877 }
1878 
1879 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1880 				const struct intel_shared_dpll *pll,
1881 				const struct intel_dpll_hw_state *pll_state)
1882 {
1883 	/*
1884 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1885 	 * the internal shift for each field
1886 	 */
1887 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1888 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1889 	else
1890 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1891 }
1892 
1893 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1894 {
1895 	/* No SSC ref */
1896 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1897 }
1898 
1899 static void skl_dump_hw_state(struct drm_i915_private *i915,
1900 			      const struct intel_dpll_hw_state *hw_state)
1901 {
1902 	drm_dbg_kms(&i915->drm, "dpll_hw_state: "
1903 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1904 		      hw_state->ctrl1,
1905 		      hw_state->cfgcr1,
1906 		      hw_state->cfgcr2);
1907 }
1908 
1909 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1910 	.enable = skl_ddi_pll_enable,
1911 	.disable = skl_ddi_pll_disable,
1912 	.get_hw_state = skl_ddi_pll_get_hw_state,
1913 	.get_freq = skl_ddi_pll_get_freq,
1914 };
1915 
1916 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1917 	.enable = skl_ddi_dpll0_enable,
1918 	.disable = skl_ddi_dpll0_disable,
1919 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1920 	.get_freq = skl_ddi_pll_get_freq,
1921 };
1922 
1923 static const struct dpll_info skl_plls[] = {
1924 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1925 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1926 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1927 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1928 	{ },
1929 };
1930 
1931 static const struct intel_dpll_mgr skl_pll_mgr = {
1932 	.dpll_info = skl_plls,
1933 	.compute_dplls = skl_compute_dpll,
1934 	.get_dplls = skl_get_dpll,
1935 	.put_dplls = intel_put_dpll,
1936 	.update_ref_clks = skl_update_dpll_ref_clks,
1937 	.dump_hw_state = skl_dump_hw_state,
1938 };
1939 
1940 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1941 			       struct intel_shared_dpll *pll)
1942 {
1943 	u32 temp;
1944 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1945 	enum dpio_phy phy;
1946 	enum dpio_channel ch;
1947 
1948 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
1949 
1950 	/* Non-SSC reference */
1951 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1952 
1953 	if (IS_GEMINILAKE(i915)) {
1954 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
1955 			     0, PORT_PLL_POWER_ENABLE);
1956 
1957 		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
1958 				 PORT_PLL_POWER_STATE), 200))
1959 			drm_err(&i915->drm,
1960 				"Power state not set for PLL:%d\n", port);
1961 	}
1962 
1963 	/* Disable 10 bit clock */
1964 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
1965 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1966 
1967 	/* Write P1 & P2 */
1968 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
1969 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1970 
1971 	/* Write M2 integer */
1972 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
1973 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1974 
1975 	/* Write N */
1976 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
1977 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1978 
1979 	/* Write M2 fraction */
1980 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
1981 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1982 
1983 	/* Write M2 fraction enable */
1984 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
1985 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1986 
1987 	/* Write coeff */
1988 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
1989 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1990 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1991 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1992 	temp |= pll->state.hw_state.pll6;
1993 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
1994 
1995 	/* Write calibration val */
1996 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
1997 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1998 
1999 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2000 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2001 
2002 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2003 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2004 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2005 	temp |= pll->state.hw_state.pll10;
2006 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2007 
2008 	/* Recalibrate with new settings */
2009 	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2010 	temp |= PORT_PLL_RECALIBRATE;
2011 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2012 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2013 	temp |= pll->state.hw_state.ebb4;
2014 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2015 
2016 	/* Enable PLL */
2017 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2018 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2019 
2020 	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2021 			200))
2022 		drm_err(&i915->drm, "PLL %d not locked\n", port);
2023 
2024 	if (IS_GEMINILAKE(i915)) {
2025 		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2026 		temp |= DCC_DELAY_RANGE_2;
2027 		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2028 	}
2029 
2030 	/*
2031 	 * While we write to the group register to program all lanes at once we
2032 	 * can read only lane registers and we pick lanes 0/1 for that.
2033 	 */
2034 	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2035 	temp &= ~LANE_STAGGER_MASK;
2036 	temp &= ~LANESTAGGER_STRAP_OVRD;
2037 	temp |= pll->state.hw_state.pcsdw12;
2038 	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2039 }
2040 
2041 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2042 				struct intel_shared_dpll *pll)
2043 {
2044 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2045 
2046 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2047 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2048 
2049 	if (IS_GEMINILAKE(i915)) {
2050 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2051 			     PORT_PLL_POWER_ENABLE, 0);
2052 
2053 		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2054 				  PORT_PLL_POWER_STATE), 200))
2055 			drm_err(&i915->drm,
2056 				"Power state not reset for PLL:%d\n", port);
2057 	}
2058 }
2059 
2060 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2061 				     struct intel_shared_dpll *pll,
2062 				     struct intel_dpll_hw_state *hw_state)
2063 {
2064 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2065 	intel_wakeref_t wakeref;
2066 	enum dpio_phy phy;
2067 	enum dpio_channel ch;
2068 	u32 val;
2069 	bool ret;
2070 
2071 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2072 
2073 	wakeref = intel_display_power_get_if_enabled(i915,
2074 						     POWER_DOMAIN_DISPLAY_CORE);
2075 	if (!wakeref)
2076 		return false;
2077 
2078 	ret = false;
2079 
2080 	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2081 	if (!(val & PORT_PLL_ENABLE))
2082 		goto out;
2083 
2084 	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2085 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2086 
2087 	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2088 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2089 
2090 	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2091 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2092 
2093 	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2094 	hw_state->pll1 &= PORT_PLL_N_MASK;
2095 
2096 	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2097 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2098 
2099 	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2100 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2101 
2102 	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2103 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2104 			  PORT_PLL_INT_COEFF_MASK |
2105 			  PORT_PLL_GAIN_CTL_MASK;
2106 
2107 	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2108 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2109 
2110 	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2111 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2112 
2113 	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2114 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2115 			   PORT_PLL_DCO_AMP_MASK;
2116 
2117 	/*
2118 	 * While we write to the group register to program all lanes at once we
2119 	 * can read only lane registers. We configure all lanes the same way, so
2120 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2121 	 */
2122 	hw_state->pcsdw12 = intel_de_read(i915,
2123 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2124 	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2125 		drm_dbg(&i915->drm,
2126 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2127 			hw_state->pcsdw12,
2128 			intel_de_read(i915,
2129 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2130 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2131 
2132 	ret = true;
2133 
2134 out:
2135 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2136 
2137 	return ret;
2138 }
2139 
2140 /* pre-calculated values for DP linkrates */
2141 static const struct dpll bxt_dp_clk_val[] = {
2142 	/* m2 is .22 binary fixed point */
2143 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2144 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2145 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2146 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2147 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2148 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2149 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2150 };
2151 
2152 static int
2153 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2154 			  struct dpll *clk_div)
2155 {
2156 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2157 
2158 	/* Calculate HDMI div */
2159 	/*
2160 	 * FIXME: tie the following calculation into
2161 	 * i9xx_crtc_compute_clock
2162 	 */
2163 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2164 		return -EINVAL;
2165 
2166 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2167 
2168 	return 0;
2169 }
2170 
2171 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2172 				    struct dpll *clk_div)
2173 {
2174 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2175 	int i;
2176 
2177 	*clk_div = bxt_dp_clk_val[0];
2178 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2179 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2180 			*clk_div = bxt_dp_clk_val[i];
2181 			break;
2182 		}
2183 	}
2184 
2185 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2186 
2187 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2188 		    clk_div->dot != crtc_state->port_clock);
2189 }
2190 
2191 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2192 				     const struct dpll *clk_div)
2193 {
2194 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2195 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2196 	int clock = crtc_state->port_clock;
2197 	int vco = clk_div->vco;
2198 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2199 	u32 lanestagger;
2200 
2201 	if (vco >= 6200000 && vco <= 6700000) {
2202 		prop_coef = 4;
2203 		int_coef = 9;
2204 		gain_ctl = 3;
2205 		targ_cnt = 8;
2206 	} else if ((vco > 5400000 && vco < 6200000) ||
2207 			(vco >= 4800000 && vco < 5400000)) {
2208 		prop_coef = 5;
2209 		int_coef = 11;
2210 		gain_ctl = 3;
2211 		targ_cnt = 9;
2212 	} else if (vco == 5400000) {
2213 		prop_coef = 3;
2214 		int_coef = 8;
2215 		gain_ctl = 1;
2216 		targ_cnt = 9;
2217 	} else {
2218 		drm_err(&i915->drm, "Invalid VCO\n");
2219 		return -EINVAL;
2220 	}
2221 
2222 	if (clock > 270000)
2223 		lanestagger = 0x18;
2224 	else if (clock > 135000)
2225 		lanestagger = 0x0d;
2226 	else if (clock > 67000)
2227 		lanestagger = 0x07;
2228 	else if (clock > 33000)
2229 		lanestagger = 0x04;
2230 	else
2231 		lanestagger = 0x02;
2232 
2233 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2234 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2235 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2236 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2237 
2238 	if (clk_div->m2 & 0x3fffff)
2239 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2240 
2241 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2242 		PORT_PLL_INT_COEFF(int_coef) |
2243 		PORT_PLL_GAIN_CTL(gain_ctl);
2244 
2245 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2246 
2247 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2248 
2249 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2250 		PORT_PLL_DCO_AMP_OVR_EN_H;
2251 
2252 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2253 
2254 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2255 
2256 	return 0;
2257 }
2258 
2259 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2260 				const struct intel_shared_dpll *pll,
2261 				const struct intel_dpll_hw_state *pll_state)
2262 {
2263 	struct dpll clock;
2264 
2265 	clock.m1 = 2;
2266 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2267 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2268 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2269 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2270 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2271 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2272 
2273 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2274 }
2275 
2276 static int
2277 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2278 {
2279 	struct dpll clk_div = {};
2280 
2281 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2282 
2283 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2284 }
2285 
2286 static int
2287 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2288 {
2289 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2290 	struct dpll clk_div = {};
2291 	int ret;
2292 
2293 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2294 
2295 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2296 	if (ret)
2297 		return ret;
2298 
2299 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2300 						      &crtc_state->dpll_hw_state);
2301 
2302 	return 0;
2303 }
2304 
2305 static int bxt_compute_dpll(struct intel_atomic_state *state,
2306 			    struct intel_crtc *crtc,
2307 			    struct intel_encoder *encoder)
2308 {
2309 	struct intel_crtc_state *crtc_state =
2310 		intel_atomic_get_new_crtc_state(state, crtc);
2311 
2312 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2313 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2314 	else if (intel_crtc_has_dp_encoder(crtc_state))
2315 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2316 	else
2317 		return -EINVAL;
2318 }
2319 
2320 static int bxt_get_dpll(struct intel_atomic_state *state,
2321 			struct intel_crtc *crtc,
2322 			struct intel_encoder *encoder)
2323 {
2324 	struct intel_crtc_state *crtc_state =
2325 		intel_atomic_get_new_crtc_state(state, crtc);
2326 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2327 	struct intel_shared_dpll *pll;
2328 	enum intel_dpll_id id;
2329 
2330 	/* 1:1 mapping between ports and PLLs */
2331 	id = (enum intel_dpll_id) encoder->port;
2332 	pll = intel_get_shared_dpll_by_id(i915, id);
2333 
2334 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2335 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2336 
2337 	intel_reference_shared_dpll(state, crtc,
2338 				    pll, &crtc_state->dpll_hw_state);
2339 
2340 	crtc_state->shared_dpll = pll;
2341 
2342 	return 0;
2343 }
2344 
2345 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2346 {
2347 	i915->display.dpll.ref_clks.ssc = 100000;
2348 	i915->display.dpll.ref_clks.nssc = 100000;
2349 	/* DSI non-SSC ref 19.2MHz */
2350 }
2351 
2352 static void bxt_dump_hw_state(struct drm_i915_private *i915,
2353 			      const struct intel_dpll_hw_state *hw_state)
2354 {
2355 	drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2356 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2357 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2358 		    hw_state->ebb0,
2359 		    hw_state->ebb4,
2360 		    hw_state->pll0,
2361 		    hw_state->pll1,
2362 		    hw_state->pll2,
2363 		    hw_state->pll3,
2364 		    hw_state->pll6,
2365 		    hw_state->pll8,
2366 		    hw_state->pll9,
2367 		    hw_state->pll10,
2368 		    hw_state->pcsdw12);
2369 }
2370 
2371 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2372 	.enable = bxt_ddi_pll_enable,
2373 	.disable = bxt_ddi_pll_disable,
2374 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2375 	.get_freq = bxt_ddi_pll_get_freq,
2376 };
2377 
2378 static const struct dpll_info bxt_plls[] = {
2379 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2380 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2381 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2382 	{ },
2383 };
2384 
2385 static const struct intel_dpll_mgr bxt_pll_mgr = {
2386 	.dpll_info = bxt_plls,
2387 	.compute_dplls = bxt_compute_dpll,
2388 	.get_dplls = bxt_get_dpll,
2389 	.put_dplls = intel_put_dpll,
2390 	.update_ref_clks = bxt_update_dpll_ref_clks,
2391 	.dump_hw_state = bxt_dump_hw_state,
2392 };
2393 
2394 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2395 				      int *qdiv, int *kdiv)
2396 {
2397 	/* even dividers */
2398 	if (bestdiv % 2 == 0) {
2399 		if (bestdiv == 2) {
2400 			*pdiv = 2;
2401 			*qdiv = 1;
2402 			*kdiv = 1;
2403 		} else if (bestdiv % 4 == 0) {
2404 			*pdiv = 2;
2405 			*qdiv = bestdiv / 4;
2406 			*kdiv = 2;
2407 		} else if (bestdiv % 6 == 0) {
2408 			*pdiv = 3;
2409 			*qdiv = bestdiv / 6;
2410 			*kdiv = 2;
2411 		} else if (bestdiv % 5 == 0) {
2412 			*pdiv = 5;
2413 			*qdiv = bestdiv / 10;
2414 			*kdiv = 2;
2415 		} else if (bestdiv % 14 == 0) {
2416 			*pdiv = 7;
2417 			*qdiv = bestdiv / 14;
2418 			*kdiv = 2;
2419 		}
2420 	} else {
2421 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2422 			*pdiv = bestdiv;
2423 			*qdiv = 1;
2424 			*kdiv = 1;
2425 		} else { /* 9, 15, 21 */
2426 			*pdiv = bestdiv / 3;
2427 			*qdiv = 1;
2428 			*kdiv = 3;
2429 		}
2430 	}
2431 }
2432 
2433 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2434 				      u32 dco_freq, u32 ref_freq,
2435 				      int pdiv, int qdiv, int kdiv)
2436 {
2437 	u32 dco;
2438 
2439 	switch (kdiv) {
2440 	case 1:
2441 		params->kdiv = 1;
2442 		break;
2443 	case 2:
2444 		params->kdiv = 2;
2445 		break;
2446 	case 3:
2447 		params->kdiv = 4;
2448 		break;
2449 	default:
2450 		WARN(1, "Incorrect KDiv\n");
2451 	}
2452 
2453 	switch (pdiv) {
2454 	case 2:
2455 		params->pdiv = 1;
2456 		break;
2457 	case 3:
2458 		params->pdiv = 2;
2459 		break;
2460 	case 5:
2461 		params->pdiv = 4;
2462 		break;
2463 	case 7:
2464 		params->pdiv = 8;
2465 		break;
2466 	default:
2467 		WARN(1, "Incorrect PDiv\n");
2468 	}
2469 
2470 	WARN_ON(kdiv != 2 && qdiv != 1);
2471 
2472 	params->qdiv_ratio = qdiv;
2473 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2474 
2475 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2476 
2477 	params->dco_integer = dco >> 15;
2478 	params->dco_fraction = dco & 0x7fff;
2479 }
2480 
2481 /*
2482  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2483  * Program half of the nominal DCO divider fraction value.
2484  */
2485 static bool
2486 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2487 {
2488 	return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
2489 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2490 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2491 		 i915->display.dpll.ref_clks.nssc == 38400;
2492 }
2493 
2494 struct icl_combo_pll_params {
2495 	int clock;
2496 	struct skl_wrpll_params wrpll;
2497 };
2498 
2499 /*
2500  * These values alrea already adjusted: they're the bits we write to the
2501  * registers, not the logical values.
2502  */
2503 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2504 	{ 540000,
2505 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2506 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2507 	{ 270000,
2508 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2509 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2510 	{ 162000,
2511 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2512 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2513 	{ 324000,
2514 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2515 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2516 	{ 216000,
2517 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2518 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2519 	{ 432000,
2520 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2521 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2522 	{ 648000,
2523 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2524 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2525 	{ 810000,
2526 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2527 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2528 };
2529 
2530 
2531 /* Also used for 38.4 MHz values. */
2532 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2533 	{ 540000,
2534 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2535 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2536 	{ 270000,
2537 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2538 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2539 	{ 162000,
2540 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2541 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2542 	{ 324000,
2543 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2544 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2545 	{ 216000,
2546 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2547 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2548 	{ 432000,
2549 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2550 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2551 	{ 648000,
2552 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2553 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2554 	{ 810000,
2555 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2556 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2557 };
2558 
2559 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2560 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2561 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2562 };
2563 
2564 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2565 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2566 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2567 };
2568 
2569 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2570 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2571 	/* the following params are unused */
2572 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2573 };
2574 
2575 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2576 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2577 	/* the following params are unused */
2578 };
2579 
2580 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2581 				 struct skl_wrpll_params *pll_params)
2582 {
2583 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2584 	const struct icl_combo_pll_params *params =
2585 		i915->display.dpll.ref_clks.nssc == 24000 ?
2586 		icl_dp_combo_pll_24MHz_values :
2587 		icl_dp_combo_pll_19_2MHz_values;
2588 	int clock = crtc_state->port_clock;
2589 	int i;
2590 
2591 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2592 		if (clock == params[i].clock) {
2593 			*pll_params = params[i].wrpll;
2594 			return 0;
2595 		}
2596 	}
2597 
2598 	MISSING_CASE(clock);
2599 	return -EINVAL;
2600 }
2601 
2602 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2603 			    struct skl_wrpll_params *pll_params)
2604 {
2605 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2606 
2607 	if (DISPLAY_VER(i915) >= 12) {
2608 		switch (i915->display.dpll.ref_clks.nssc) {
2609 		default:
2610 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2611 			fallthrough;
2612 		case 19200:
2613 		case 38400:
2614 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2615 			break;
2616 		case 24000:
2617 			*pll_params = tgl_tbt_pll_24MHz_values;
2618 			break;
2619 		}
2620 	} else {
2621 		switch (i915->display.dpll.ref_clks.nssc) {
2622 		default:
2623 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2624 			fallthrough;
2625 		case 19200:
2626 		case 38400:
2627 			*pll_params = icl_tbt_pll_19_2MHz_values;
2628 			break;
2629 		case 24000:
2630 			*pll_params = icl_tbt_pll_24MHz_values;
2631 			break;
2632 		}
2633 	}
2634 
2635 	return 0;
2636 }
2637 
2638 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2639 				    const struct intel_shared_dpll *pll,
2640 				    const struct intel_dpll_hw_state *pll_state)
2641 {
2642 	/*
2643 	 * The PLL outputs multiple frequencies at the same time, selection is
2644 	 * made at DDI clock mux level.
2645 	 */
2646 	drm_WARN_ON(&i915->drm, 1);
2647 
2648 	return 0;
2649 }
2650 
2651 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2652 {
2653 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2654 
2655 	/*
2656 	 * For ICL+, the spec states: if reference frequency is 38.4,
2657 	 * use 19.2 because the DPLL automatically divides that by 2.
2658 	 */
2659 	if (ref_clock == 38400)
2660 		ref_clock = 19200;
2661 
2662 	return ref_clock;
2663 }
2664 
2665 static int
2666 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2667 	       struct skl_wrpll_params *wrpll_params)
2668 {
2669 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2670 	int ref_clock = icl_wrpll_ref_clock(i915);
2671 	u32 afe_clock = crtc_state->port_clock * 5;
2672 	u32 dco_min = 7998000;
2673 	u32 dco_max = 10000000;
2674 	u32 dco_mid = (dco_min + dco_max) / 2;
2675 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2676 					 18, 20, 24, 28, 30, 32,  36,  40,
2677 					 42, 44, 48, 50, 52, 54,  56,  60,
2678 					 64, 66, 68, 70, 72, 76,  78,  80,
2679 					 84, 88, 90, 92, 96, 98, 100, 102,
2680 					  3,  5,  7,  9, 15, 21 };
2681 	u32 dco, best_dco = 0, dco_centrality = 0;
2682 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2683 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2684 
2685 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2686 		dco = afe_clock * dividers[d];
2687 
2688 		if (dco <= dco_max && dco >= dco_min) {
2689 			dco_centrality = abs(dco - dco_mid);
2690 
2691 			if (dco_centrality < best_dco_centrality) {
2692 				best_dco_centrality = dco_centrality;
2693 				best_div = dividers[d];
2694 				best_dco = dco;
2695 			}
2696 		}
2697 	}
2698 
2699 	if (best_div == 0)
2700 		return -EINVAL;
2701 
2702 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2703 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2704 				  pdiv, qdiv, kdiv);
2705 
2706 	return 0;
2707 }
2708 
2709 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2710 				      const struct intel_shared_dpll *pll,
2711 				      const struct intel_dpll_hw_state *pll_state)
2712 {
2713 	int ref_clock = icl_wrpll_ref_clock(i915);
2714 	u32 dco_fraction;
2715 	u32 p0, p1, p2, dco_freq;
2716 
2717 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2718 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2719 
2720 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2721 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2722 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2723 	else
2724 		p1 = 1;
2725 
2726 	switch (p0) {
2727 	case DPLL_CFGCR1_PDIV_2:
2728 		p0 = 2;
2729 		break;
2730 	case DPLL_CFGCR1_PDIV_3:
2731 		p0 = 3;
2732 		break;
2733 	case DPLL_CFGCR1_PDIV_5:
2734 		p0 = 5;
2735 		break;
2736 	case DPLL_CFGCR1_PDIV_7:
2737 		p0 = 7;
2738 		break;
2739 	}
2740 
2741 	switch (p2) {
2742 	case DPLL_CFGCR1_KDIV_1:
2743 		p2 = 1;
2744 		break;
2745 	case DPLL_CFGCR1_KDIV_2:
2746 		p2 = 2;
2747 		break;
2748 	case DPLL_CFGCR1_KDIV_3:
2749 		p2 = 3;
2750 		break;
2751 	}
2752 
2753 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2754 		   ref_clock;
2755 
2756 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2757 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2758 
2759 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2760 		dco_fraction *= 2;
2761 
2762 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2763 
2764 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2765 		return 0;
2766 
2767 	return dco_freq / (p0 * p1 * p2 * 5);
2768 }
2769 
2770 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2771 				const struct skl_wrpll_params *pll_params,
2772 				struct intel_dpll_hw_state *pll_state)
2773 {
2774 	u32 dco_fraction = pll_params->dco_fraction;
2775 
2776 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2777 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2778 
2779 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2780 			    pll_params->dco_integer;
2781 
2782 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2783 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2784 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2785 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2786 
2787 	if (DISPLAY_VER(i915) >= 12)
2788 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2789 	else
2790 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2791 
2792 	if (i915->display.vbt.override_afc_startup)
2793 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2794 }
2795 
2796 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2797 				    u32 *target_dco_khz,
2798 				    struct intel_dpll_hw_state *state,
2799 				    bool is_dkl)
2800 {
2801 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2802 	u32 dco_min_freq, dco_max_freq;
2803 	unsigned int i;
2804 	int div2;
2805 
2806 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2807 	dco_max_freq = is_dp ? 8100000 : 10000000;
2808 
2809 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2810 		int div1 = div1_vals[i];
2811 
2812 		for (div2 = 10; div2 > 0; div2--) {
2813 			int dco = div1 * div2 * clock_khz * 5;
2814 			int a_divratio, tlinedrv, inputsel;
2815 			u32 hsdiv;
2816 
2817 			if (dco < dco_min_freq || dco > dco_max_freq)
2818 				continue;
2819 
2820 			if (div2 >= 2) {
2821 				/*
2822 				 * Note: a_divratio not matching TGL BSpec
2823 				 * algorithm but matching hardcoded values and
2824 				 * working on HW for DP alt-mode at least
2825 				 */
2826 				a_divratio = is_dp ? 10 : 5;
2827 				tlinedrv = is_dkl ? 1 : 2;
2828 			} else {
2829 				a_divratio = 5;
2830 				tlinedrv = 0;
2831 			}
2832 			inputsel = is_dp ? 0 : 1;
2833 
2834 			switch (div1) {
2835 			default:
2836 				MISSING_CASE(div1);
2837 				fallthrough;
2838 			case 2:
2839 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2840 				break;
2841 			case 3:
2842 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2843 				break;
2844 			case 5:
2845 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2846 				break;
2847 			case 7:
2848 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2849 				break;
2850 			}
2851 
2852 			*target_dco_khz = dco;
2853 
2854 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2855 
2856 			state->mg_clktop2_coreclkctl1 =
2857 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2858 
2859 			state->mg_clktop2_hsclkctl =
2860 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2861 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2862 				hsdiv |
2863 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2864 
2865 			return 0;
2866 		}
2867 	}
2868 
2869 	return -EINVAL;
2870 }
2871 
2872 /*
2873  * The specification for this function uses real numbers, so the math had to be
2874  * adapted to integer-only calculation, that's why it looks so different.
2875  */
2876 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2877 				 struct intel_dpll_hw_state *pll_state)
2878 {
2879 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2880 	int refclk_khz = i915->display.dpll.ref_clks.nssc;
2881 	int clock = crtc_state->port_clock;
2882 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2883 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2884 	u32 prop_coeff, int_coeff;
2885 	u32 tdc_targetcnt, feedfwgain;
2886 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2887 	u64 tmp;
2888 	bool use_ssc = false;
2889 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2890 	bool is_dkl = DISPLAY_VER(i915) >= 12;
2891 	int ret;
2892 
2893 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2894 				       pll_state, is_dkl);
2895 	if (ret)
2896 		return ret;
2897 
2898 	m1div = 2;
2899 	m2div_int = dco_khz / (refclk_khz * m1div);
2900 	if (m2div_int > 255) {
2901 		if (!is_dkl) {
2902 			m1div = 4;
2903 			m2div_int = dco_khz / (refclk_khz * m1div);
2904 		}
2905 
2906 		if (m2div_int > 255)
2907 			return -EINVAL;
2908 	}
2909 	m2div_rem = dco_khz % (refclk_khz * m1div);
2910 
2911 	tmp = (u64)m2div_rem * (1 << 22);
2912 	do_div(tmp, refclk_khz * m1div);
2913 	m2div_frac = tmp;
2914 
2915 	switch (refclk_khz) {
2916 	case 19200:
2917 		iref_ndiv = 1;
2918 		iref_trim = 28;
2919 		iref_pulse_w = 1;
2920 		break;
2921 	case 24000:
2922 		iref_ndiv = 1;
2923 		iref_trim = 25;
2924 		iref_pulse_w = 2;
2925 		break;
2926 	case 38400:
2927 		iref_ndiv = 2;
2928 		iref_trim = 28;
2929 		iref_pulse_w = 1;
2930 		break;
2931 	default:
2932 		MISSING_CASE(refclk_khz);
2933 		return -EINVAL;
2934 	}
2935 
2936 	/*
2937 	 * tdc_res = 0.000003
2938 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2939 	 *
2940 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2941 	 * was supposed to be a division, but we rearranged the operations of
2942 	 * the formula to avoid early divisions so we don't multiply the
2943 	 * rounding errors.
2944 	 *
2945 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2946 	 * we also rearrange to work with integers.
2947 	 *
2948 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2949 	 * last division by 10.
2950 	 */
2951 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2952 
2953 	/*
2954 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2955 	 * 32 bits. That's not a problem since we round the division down
2956 	 * anyway.
2957 	 */
2958 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2959 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2960 
2961 	if (dco_khz >= 9000000) {
2962 		prop_coeff = 5;
2963 		int_coeff = 10;
2964 	} else {
2965 		prop_coeff = 4;
2966 		int_coeff = 8;
2967 	}
2968 
2969 	if (use_ssc) {
2970 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2971 		do_div(tmp, refclk_khz * m1div * 10000);
2972 		ssc_stepsize = tmp;
2973 
2974 		tmp = mul_u32_u32(dco_khz, 1000);
2975 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2976 	} else {
2977 		ssc_stepsize = 0;
2978 		ssc_steplen = 0;
2979 	}
2980 	ssc_steplog = 4;
2981 
2982 	/* write pll_state calculations */
2983 	if (is_dkl) {
2984 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2985 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2986 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2987 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2988 		if (i915->display.vbt.override_afc_startup) {
2989 			u8 val = i915->display.vbt.override_afc_startup_val;
2990 
2991 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2992 		}
2993 
2994 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2995 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2996 
2997 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2998 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2999 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3000 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3001 
3002 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3003 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3004 
3005 		pll_state->mg_pll_tdc_coldst_bias =
3006 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3007 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3008 
3009 	} else {
3010 		pll_state->mg_pll_div0 =
3011 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3012 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3013 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3014 
3015 		pll_state->mg_pll_div1 =
3016 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3017 			MG_PLL_DIV1_DITHER_DIV_2 |
3018 			MG_PLL_DIV1_NDIVRATIO(1) |
3019 			MG_PLL_DIV1_FBPREDIV(m1div);
3020 
3021 		pll_state->mg_pll_lf =
3022 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3023 			MG_PLL_LF_AFCCNTSEL_512 |
3024 			MG_PLL_LF_GAINCTRL(1) |
3025 			MG_PLL_LF_INT_COEFF(int_coeff) |
3026 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3027 
3028 		pll_state->mg_pll_frac_lock =
3029 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3030 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3031 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3032 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3033 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3034 		if (use_ssc || m2div_rem > 0)
3035 			pll_state->mg_pll_frac_lock |=
3036 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3037 
3038 		pll_state->mg_pll_ssc =
3039 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3040 			MG_PLL_SSC_TYPE(2) |
3041 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3042 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3043 			MG_PLL_SSC_FLLEN |
3044 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3045 
3046 		pll_state->mg_pll_tdc_coldst_bias =
3047 			MG_PLL_TDC_COLDST_COLDSTART |
3048 			MG_PLL_TDC_COLDST_IREFINT_EN |
3049 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3050 			MG_PLL_TDC_TDCOVCCORR_EN |
3051 			MG_PLL_TDC_TDCSEL(3);
3052 
3053 		pll_state->mg_pll_bias =
3054 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3055 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3056 			MG_PLL_BIAS_BIAS_BONUS(10) |
3057 			MG_PLL_BIAS_BIASCAL_EN |
3058 			MG_PLL_BIAS_CTRIM(12) |
3059 			MG_PLL_BIAS_VREF_RDAC(4) |
3060 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3061 
3062 		if (refclk_khz == 38400) {
3063 			pll_state->mg_pll_tdc_coldst_bias_mask =
3064 				MG_PLL_TDC_COLDST_COLDSTART;
3065 			pll_state->mg_pll_bias_mask = 0;
3066 		} else {
3067 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3068 			pll_state->mg_pll_bias_mask = -1U;
3069 		}
3070 
3071 		pll_state->mg_pll_tdc_coldst_bias &=
3072 			pll_state->mg_pll_tdc_coldst_bias_mask;
3073 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3074 	}
3075 
3076 	return 0;
3077 }
3078 
3079 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3080 				   const struct intel_shared_dpll *pll,
3081 				   const struct intel_dpll_hw_state *pll_state)
3082 {
3083 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3084 	u64 tmp;
3085 
3086 	ref_clock = i915->display.dpll.ref_clks.nssc;
3087 
3088 	if (DISPLAY_VER(i915) >= 12) {
3089 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3090 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3091 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3092 
3093 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3094 			m2_frac = pll_state->mg_pll_bias &
3095 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3096 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3097 		} else {
3098 			m2_frac = 0;
3099 		}
3100 	} else {
3101 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3102 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3103 
3104 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3105 			m2_frac = pll_state->mg_pll_div0 &
3106 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3107 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3108 		} else {
3109 			m2_frac = 0;
3110 		}
3111 	}
3112 
3113 	switch (pll_state->mg_clktop2_hsclkctl &
3114 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3115 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3116 		div1 = 2;
3117 		break;
3118 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3119 		div1 = 3;
3120 		break;
3121 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3122 		div1 = 5;
3123 		break;
3124 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3125 		div1 = 7;
3126 		break;
3127 	default:
3128 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3129 		return 0;
3130 	}
3131 
3132 	div2 = (pll_state->mg_clktop2_hsclkctl &
3133 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3134 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3135 
3136 	/* div2 value of 0 is same as 1 means no div */
3137 	if (div2 == 0)
3138 		div2 = 1;
3139 
3140 	/*
3141 	 * Adjust the original formula to delay the division by 2^22 in order to
3142 	 * minimize possible rounding errors.
3143 	 */
3144 	tmp = (u64)m1 * m2_int * ref_clock +
3145 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3146 	tmp = div_u64(tmp, 5 * div1 * div2);
3147 
3148 	return tmp;
3149 }
3150 
3151 /**
3152  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3153  * @crtc_state: state for the CRTC to select the DPLL for
3154  * @port_dpll_id: the active @port_dpll_id to select
3155  *
3156  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3157  * CRTC.
3158  */
3159 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3160 			      enum icl_port_dpll_id port_dpll_id)
3161 {
3162 	struct icl_port_dpll *port_dpll =
3163 		&crtc_state->icl_port_dplls[port_dpll_id];
3164 
3165 	crtc_state->shared_dpll = port_dpll->pll;
3166 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3167 }
3168 
3169 static void icl_update_active_dpll(struct intel_atomic_state *state,
3170 				   struct intel_crtc *crtc,
3171 				   struct intel_encoder *encoder)
3172 {
3173 	struct intel_crtc_state *crtc_state =
3174 		intel_atomic_get_new_crtc_state(state, crtc);
3175 	struct intel_digital_port *primary_port;
3176 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3177 
3178 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3179 		enc_to_mst(encoder)->primary :
3180 		enc_to_dig_port(encoder);
3181 
3182 	if (primary_port &&
3183 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3184 	     intel_tc_port_in_legacy_mode(primary_port)))
3185 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3186 
3187 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3188 }
3189 
3190 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3191 				      struct intel_crtc *crtc)
3192 {
3193 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3194 	struct intel_crtc_state *crtc_state =
3195 		intel_atomic_get_new_crtc_state(state, crtc);
3196 	struct icl_port_dpll *port_dpll =
3197 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3198 	struct skl_wrpll_params pll_params = {};
3199 	int ret;
3200 
3201 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3202 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3203 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3204 	else
3205 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3206 
3207 	if (ret)
3208 		return ret;
3209 
3210 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3211 
3212 	/* this is mainly for the fastset check */
3213 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3214 
3215 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3216 							    &port_dpll->hw_state);
3217 
3218 	return 0;
3219 }
3220 
3221 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3222 				  struct intel_crtc *crtc,
3223 				  struct intel_encoder *encoder)
3224 {
3225 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3226 	struct intel_crtc_state *crtc_state =
3227 		intel_atomic_get_new_crtc_state(state, crtc);
3228 	struct icl_port_dpll *port_dpll =
3229 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3230 	enum port port = encoder->port;
3231 	unsigned long dpll_mask;
3232 
3233 	if (IS_ALDERLAKE_S(i915)) {
3234 		dpll_mask =
3235 			BIT(DPLL_ID_DG1_DPLL3) |
3236 			BIT(DPLL_ID_DG1_DPLL2) |
3237 			BIT(DPLL_ID_ICL_DPLL1) |
3238 			BIT(DPLL_ID_ICL_DPLL0);
3239 	} else if (IS_DG1(i915)) {
3240 		if (port == PORT_D || port == PORT_E) {
3241 			dpll_mask =
3242 				BIT(DPLL_ID_DG1_DPLL2) |
3243 				BIT(DPLL_ID_DG1_DPLL3);
3244 		} else {
3245 			dpll_mask =
3246 				BIT(DPLL_ID_DG1_DPLL0) |
3247 				BIT(DPLL_ID_DG1_DPLL1);
3248 		}
3249 	} else if (IS_ROCKETLAKE(i915)) {
3250 		dpll_mask =
3251 			BIT(DPLL_ID_EHL_DPLL4) |
3252 			BIT(DPLL_ID_ICL_DPLL1) |
3253 			BIT(DPLL_ID_ICL_DPLL0);
3254 	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3255 		   port != PORT_A) {
3256 		dpll_mask =
3257 			BIT(DPLL_ID_EHL_DPLL4) |
3258 			BIT(DPLL_ID_ICL_DPLL1) |
3259 			BIT(DPLL_ID_ICL_DPLL0);
3260 	} else {
3261 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3262 	}
3263 
3264 	/* Eliminate DPLLs from consideration if reserved by HTI */
3265 	dpll_mask &= ~intel_hti_dpll_mask(i915);
3266 
3267 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3268 						&port_dpll->hw_state,
3269 						dpll_mask);
3270 	if (!port_dpll->pll)
3271 		return -EINVAL;
3272 
3273 	intel_reference_shared_dpll(state, crtc,
3274 				    port_dpll->pll, &port_dpll->hw_state);
3275 
3276 	icl_update_active_dpll(state, crtc, encoder);
3277 
3278 	return 0;
3279 }
3280 
3281 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3282 				    struct intel_crtc *crtc)
3283 {
3284 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3285 	struct intel_crtc_state *crtc_state =
3286 		intel_atomic_get_new_crtc_state(state, crtc);
3287 	struct icl_port_dpll *port_dpll =
3288 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3289 	struct skl_wrpll_params pll_params = {};
3290 	int ret;
3291 
3292 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3293 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3294 	if (ret)
3295 		return ret;
3296 
3297 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3298 
3299 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3300 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3301 	if (ret)
3302 		return ret;
3303 
3304 	/* this is mainly for the fastset check */
3305 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3306 
3307 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3308 							 &port_dpll->hw_state);
3309 
3310 	return 0;
3311 }
3312 
3313 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3314 				struct intel_crtc *crtc,
3315 				struct intel_encoder *encoder)
3316 {
3317 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3318 	struct intel_crtc_state *crtc_state =
3319 		intel_atomic_get_new_crtc_state(state, crtc);
3320 	struct icl_port_dpll *port_dpll =
3321 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3322 	enum intel_dpll_id dpll_id;
3323 	int ret;
3324 
3325 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3326 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3327 						&port_dpll->hw_state,
3328 						BIT(DPLL_ID_ICL_TBTPLL));
3329 	if (!port_dpll->pll)
3330 		return -EINVAL;
3331 	intel_reference_shared_dpll(state, crtc,
3332 				    port_dpll->pll, &port_dpll->hw_state);
3333 
3334 
3335 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3336 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
3337 							 encoder->port));
3338 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3339 						&port_dpll->hw_state,
3340 						BIT(dpll_id));
3341 	if (!port_dpll->pll) {
3342 		ret = -EINVAL;
3343 		goto err_unreference_tbt_pll;
3344 	}
3345 	intel_reference_shared_dpll(state, crtc,
3346 				    port_dpll->pll, &port_dpll->hw_state);
3347 
3348 	icl_update_active_dpll(state, crtc, encoder);
3349 
3350 	return 0;
3351 
3352 err_unreference_tbt_pll:
3353 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3354 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3355 
3356 	return ret;
3357 }
3358 
3359 static int icl_compute_dplls(struct intel_atomic_state *state,
3360 			     struct intel_crtc *crtc,
3361 			     struct intel_encoder *encoder)
3362 {
3363 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3364 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3365 
3366 	if (intel_phy_is_combo(i915, phy))
3367 		return icl_compute_combo_phy_dpll(state, crtc);
3368 	else if (intel_phy_is_tc(i915, phy))
3369 		return icl_compute_tc_phy_dplls(state, crtc);
3370 
3371 	MISSING_CASE(phy);
3372 
3373 	return 0;
3374 }
3375 
3376 static int icl_get_dplls(struct intel_atomic_state *state,
3377 			 struct intel_crtc *crtc,
3378 			 struct intel_encoder *encoder)
3379 {
3380 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3381 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3382 
3383 	if (intel_phy_is_combo(i915, phy))
3384 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3385 	else if (intel_phy_is_tc(i915, phy))
3386 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3387 
3388 	MISSING_CASE(phy);
3389 
3390 	return -EINVAL;
3391 }
3392 
3393 static void icl_put_dplls(struct intel_atomic_state *state,
3394 			  struct intel_crtc *crtc)
3395 {
3396 	const struct intel_crtc_state *old_crtc_state =
3397 		intel_atomic_get_old_crtc_state(state, crtc);
3398 	struct intel_crtc_state *new_crtc_state =
3399 		intel_atomic_get_new_crtc_state(state, crtc);
3400 	enum icl_port_dpll_id id;
3401 
3402 	new_crtc_state->shared_dpll = NULL;
3403 
3404 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3405 		const struct icl_port_dpll *old_port_dpll =
3406 			&old_crtc_state->icl_port_dplls[id];
3407 		struct icl_port_dpll *new_port_dpll =
3408 			&new_crtc_state->icl_port_dplls[id];
3409 
3410 		new_port_dpll->pll = NULL;
3411 
3412 		if (!old_port_dpll->pll)
3413 			continue;
3414 
3415 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3416 	}
3417 }
3418 
3419 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3420 				struct intel_shared_dpll *pll,
3421 				struct intel_dpll_hw_state *hw_state)
3422 {
3423 	const enum intel_dpll_id id = pll->info->id;
3424 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3425 	intel_wakeref_t wakeref;
3426 	bool ret = false;
3427 	u32 val;
3428 
3429 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3430 
3431 	wakeref = intel_display_power_get_if_enabled(i915,
3432 						     POWER_DOMAIN_DISPLAY_CORE);
3433 	if (!wakeref)
3434 		return false;
3435 
3436 	val = intel_de_read(i915, enable_reg);
3437 	if (!(val & PLL_ENABLE))
3438 		goto out;
3439 
3440 	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3441 						  MG_REFCLKIN_CTL(tc_port));
3442 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3443 
3444 	hw_state->mg_clktop2_coreclkctl1 =
3445 		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3446 	hw_state->mg_clktop2_coreclkctl1 &=
3447 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3448 
3449 	hw_state->mg_clktop2_hsclkctl =
3450 		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3451 	hw_state->mg_clktop2_hsclkctl &=
3452 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3453 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3454 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3455 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3456 
3457 	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3458 	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3459 	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3460 	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3461 						   MG_PLL_FRAC_LOCK(tc_port));
3462 	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3463 
3464 	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3465 	hw_state->mg_pll_tdc_coldst_bias =
3466 		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3467 
3468 	if (i915->display.dpll.ref_clks.nssc == 38400) {
3469 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3470 		hw_state->mg_pll_bias_mask = 0;
3471 	} else {
3472 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3473 		hw_state->mg_pll_bias_mask = -1U;
3474 	}
3475 
3476 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3477 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3478 
3479 	ret = true;
3480 out:
3481 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3482 	return ret;
3483 }
3484 
3485 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3486 				 struct intel_shared_dpll *pll,
3487 				 struct intel_dpll_hw_state *hw_state)
3488 {
3489 	const enum intel_dpll_id id = pll->info->id;
3490 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3491 	intel_wakeref_t wakeref;
3492 	bool ret = false;
3493 	u32 val;
3494 
3495 	wakeref = intel_display_power_get_if_enabled(i915,
3496 						     POWER_DOMAIN_DISPLAY_CORE);
3497 	if (!wakeref)
3498 		return false;
3499 
3500 	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3501 	if (!(val & PLL_ENABLE))
3502 		goto out;
3503 
3504 	/*
3505 	 * All registers read here have the same HIP_INDEX_REG even though
3506 	 * they are on different building blocks
3507 	 */
3508 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3509 						       DKL_REFCLKIN_CTL(tc_port));
3510 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3511 
3512 	hw_state->mg_clktop2_hsclkctl =
3513 		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3514 	hw_state->mg_clktop2_hsclkctl &=
3515 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3516 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3517 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3518 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3519 
3520 	hw_state->mg_clktop2_coreclkctl1 =
3521 		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3522 	hw_state->mg_clktop2_coreclkctl1 &=
3523 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3524 
3525 	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3526 	val = DKL_PLL_DIV0_MASK;
3527 	if (i915->display.vbt.override_afc_startup)
3528 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3529 	hw_state->mg_pll_div0 &= val;
3530 
3531 	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3532 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3533 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3534 
3535 	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3536 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3537 				 DKL_PLL_SSC_STEP_LEN_MASK |
3538 				 DKL_PLL_SSC_STEP_NUM_MASK |
3539 				 DKL_PLL_SSC_EN);
3540 
3541 	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3542 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3543 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3544 
3545 	hw_state->mg_pll_tdc_coldst_bias =
3546 		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3547 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3548 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3549 
3550 	ret = true;
3551 out:
3552 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3553 	return ret;
3554 }
3555 
3556 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3557 				 struct intel_shared_dpll *pll,
3558 				 struct intel_dpll_hw_state *hw_state,
3559 				 i915_reg_t enable_reg)
3560 {
3561 	const enum intel_dpll_id id = pll->info->id;
3562 	intel_wakeref_t wakeref;
3563 	bool ret = false;
3564 	u32 val;
3565 
3566 	wakeref = intel_display_power_get_if_enabled(i915,
3567 						     POWER_DOMAIN_DISPLAY_CORE);
3568 	if (!wakeref)
3569 		return false;
3570 
3571 	val = intel_de_read(i915, enable_reg);
3572 	if (!(val & PLL_ENABLE))
3573 		goto out;
3574 
3575 	if (IS_ALDERLAKE_S(i915)) {
3576 		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3577 		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3578 	} else if (IS_DG1(i915)) {
3579 		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3580 		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3581 	} else if (IS_ROCKETLAKE(i915)) {
3582 		hw_state->cfgcr0 = intel_de_read(i915,
3583 						 RKL_DPLL_CFGCR0(id));
3584 		hw_state->cfgcr1 = intel_de_read(i915,
3585 						 RKL_DPLL_CFGCR1(id));
3586 	} else if (DISPLAY_VER(i915) >= 12) {
3587 		hw_state->cfgcr0 = intel_de_read(i915,
3588 						 TGL_DPLL_CFGCR0(id));
3589 		hw_state->cfgcr1 = intel_de_read(i915,
3590 						 TGL_DPLL_CFGCR1(id));
3591 		if (i915->display.vbt.override_afc_startup) {
3592 			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3593 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3594 		}
3595 	} else {
3596 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3597 		    id == DPLL_ID_EHL_DPLL4) {
3598 			hw_state->cfgcr0 = intel_de_read(i915,
3599 							 ICL_DPLL_CFGCR0(4));
3600 			hw_state->cfgcr1 = intel_de_read(i915,
3601 							 ICL_DPLL_CFGCR1(4));
3602 		} else {
3603 			hw_state->cfgcr0 = intel_de_read(i915,
3604 							 ICL_DPLL_CFGCR0(id));
3605 			hw_state->cfgcr1 = intel_de_read(i915,
3606 							 ICL_DPLL_CFGCR1(id));
3607 		}
3608 	}
3609 
3610 	ret = true;
3611 out:
3612 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3613 	return ret;
3614 }
3615 
3616 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3617 				   struct intel_shared_dpll *pll,
3618 				   struct intel_dpll_hw_state *hw_state)
3619 {
3620 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3621 
3622 	return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3623 }
3624 
3625 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3626 				 struct intel_shared_dpll *pll,
3627 				 struct intel_dpll_hw_state *hw_state)
3628 {
3629 	return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3630 }
3631 
3632 static void icl_dpll_write(struct drm_i915_private *i915,
3633 			   struct intel_shared_dpll *pll)
3634 {
3635 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3636 	const enum intel_dpll_id id = pll->info->id;
3637 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3638 
3639 	if (IS_ALDERLAKE_S(i915)) {
3640 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3641 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3642 	} else if (IS_DG1(i915)) {
3643 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3644 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3645 	} else if (IS_ROCKETLAKE(i915)) {
3646 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3647 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3648 	} else if (DISPLAY_VER(i915) >= 12) {
3649 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3650 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3651 		div0_reg = TGL_DPLL0_DIV0(id);
3652 	} else {
3653 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3654 		    id == DPLL_ID_EHL_DPLL4) {
3655 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3656 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3657 		} else {
3658 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3659 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3660 		}
3661 	}
3662 
3663 	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3664 	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3665 	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3666 			 !i915_mmio_reg_valid(div0_reg));
3667 	if (i915->display.vbt.override_afc_startup &&
3668 	    i915_mmio_reg_valid(div0_reg))
3669 		intel_de_rmw(i915, div0_reg,
3670 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3671 	intel_de_posting_read(i915, cfgcr1_reg);
3672 }
3673 
3674 static void icl_mg_pll_write(struct drm_i915_private *i915,
3675 			     struct intel_shared_dpll *pll)
3676 {
3677 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3678 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3679 
3680 	/*
3681 	 * Some of the following registers have reserved fields, so program
3682 	 * these with RMW based on a mask. The mask can be fixed or generated
3683 	 * during the calc/readout phase if the mask depends on some other HW
3684 	 * state like refclk, see icl_calc_mg_pll_state().
3685 	 */
3686 	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3687 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3688 
3689 	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3690 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3691 		     hw_state->mg_clktop2_coreclkctl1);
3692 
3693 	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3694 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3695 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3696 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3697 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3698 		     hw_state->mg_clktop2_hsclkctl);
3699 
3700 	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3701 	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3702 	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3703 	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3704 		       hw_state->mg_pll_frac_lock);
3705 	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3706 
3707 	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3708 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3709 
3710 	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3711 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3712 		     hw_state->mg_pll_tdc_coldst_bias);
3713 
3714 	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3715 }
3716 
3717 static void dkl_pll_write(struct drm_i915_private *i915,
3718 			  struct intel_shared_dpll *pll)
3719 {
3720 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3721 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3722 	u32 val;
3723 
3724 	/*
3725 	 * All registers programmed here have the same HIP_INDEX_REG even
3726 	 * though on different building block
3727 	 */
3728 	/* All the registers are RMW */
3729 	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3730 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3731 	val |= hw_state->mg_refclkin_ctl;
3732 	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3733 
3734 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3735 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3736 	val |= hw_state->mg_clktop2_coreclkctl1;
3737 	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3738 
3739 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3740 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3741 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3742 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3743 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3744 	val |= hw_state->mg_clktop2_hsclkctl;
3745 	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3746 
3747 	val = DKL_PLL_DIV0_MASK;
3748 	if (i915->display.vbt.override_afc_startup)
3749 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3750 	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3751 			  hw_state->mg_pll_div0);
3752 
3753 	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3754 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3755 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3756 	val |= hw_state->mg_pll_div1;
3757 	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3758 
3759 	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3760 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3761 		 DKL_PLL_SSC_STEP_LEN_MASK |
3762 		 DKL_PLL_SSC_STEP_NUM_MASK |
3763 		 DKL_PLL_SSC_EN);
3764 	val |= hw_state->mg_pll_ssc;
3765 	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3766 
3767 	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3768 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3769 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3770 	val |= hw_state->mg_pll_bias;
3771 	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3772 
3773 	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3774 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3775 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3776 	val |= hw_state->mg_pll_tdc_coldst_bias;
3777 	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3778 
3779 	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3780 }
3781 
3782 static void icl_pll_power_enable(struct drm_i915_private *i915,
3783 				 struct intel_shared_dpll *pll,
3784 				 i915_reg_t enable_reg)
3785 {
3786 	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3787 
3788 	/*
3789 	 * The spec says we need to "wait" but it also says it should be
3790 	 * immediate.
3791 	 */
3792 	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3793 		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3794 			pll->info->id);
3795 }
3796 
3797 static void icl_pll_enable(struct drm_i915_private *i915,
3798 			   struct intel_shared_dpll *pll,
3799 			   i915_reg_t enable_reg)
3800 {
3801 	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3802 
3803 	/* Timeout is actually 600us. */
3804 	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3805 		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3806 }
3807 
3808 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3809 {
3810 	u32 val;
3811 
3812 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3813 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3814 		return;
3815 	/*
3816 	 * Wa_16011069516:adl-p[a0]
3817 	 *
3818 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3819 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3820 	 * sanity check this assumption with a double read, which presumably
3821 	 * returns the correct value even with clock gating on.
3822 	 *
3823 	 * Instead of the usual place for workarounds we apply this one here,
3824 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3825 	 */
3826 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3827 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3828 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3829 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3830 }
3831 
3832 static void combo_pll_enable(struct drm_i915_private *i915,
3833 			     struct intel_shared_dpll *pll)
3834 {
3835 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3836 
3837 	if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3838 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3839 
3840 		/*
3841 		 * We need to disable DC states when this DPLL is enabled.
3842 		 * This can be done by taking a reference on DPLL4 power
3843 		 * domain.
3844 		 */
3845 		pll->wakeref = intel_display_power_get(i915,
3846 						       POWER_DOMAIN_DC_OFF);
3847 	}
3848 
3849 	icl_pll_power_enable(i915, pll, enable_reg);
3850 
3851 	icl_dpll_write(i915, pll);
3852 
3853 	/*
3854 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3855 	 * paths should already be setting the appropriate voltage, hence we do
3856 	 * nothing here.
3857 	 */
3858 
3859 	icl_pll_enable(i915, pll, enable_reg);
3860 
3861 	adlp_cmtg_clock_gating_wa(i915, pll);
3862 
3863 	/* DVFS post sequence would be here. See the comment above. */
3864 }
3865 
3866 static void tbt_pll_enable(struct drm_i915_private *i915,
3867 			   struct intel_shared_dpll *pll)
3868 {
3869 	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3870 
3871 	icl_dpll_write(i915, pll);
3872 
3873 	/*
3874 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3875 	 * paths should already be setting the appropriate voltage, hence we do
3876 	 * nothing here.
3877 	 */
3878 
3879 	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3880 
3881 	/* DVFS post sequence would be here. See the comment above. */
3882 }
3883 
3884 static void mg_pll_enable(struct drm_i915_private *i915,
3885 			  struct intel_shared_dpll *pll)
3886 {
3887 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3888 
3889 	icl_pll_power_enable(i915, pll, enable_reg);
3890 
3891 	if (DISPLAY_VER(i915) >= 12)
3892 		dkl_pll_write(i915, pll);
3893 	else
3894 		icl_mg_pll_write(i915, pll);
3895 
3896 	/*
3897 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3898 	 * paths should already be setting the appropriate voltage, hence we do
3899 	 * nothing here.
3900 	 */
3901 
3902 	icl_pll_enable(i915, pll, enable_reg);
3903 
3904 	/* DVFS post sequence would be here. See the comment above. */
3905 }
3906 
3907 static void icl_pll_disable(struct drm_i915_private *i915,
3908 			    struct intel_shared_dpll *pll,
3909 			    i915_reg_t enable_reg)
3910 {
3911 	/* The first steps are done by intel_ddi_post_disable(). */
3912 
3913 	/*
3914 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3915 	 * paths should already be setting the appropriate voltage, hence we do
3916 	 * nothing here.
3917 	 */
3918 
3919 	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
3920 
3921 	/* Timeout is actually 1us. */
3922 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3923 		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3924 
3925 	/* DVFS post sequence would be here. See the comment above. */
3926 
3927 	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
3928 
3929 	/*
3930 	 * The spec says we need to "wait" but it also says it should be
3931 	 * immediate.
3932 	 */
3933 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3934 		drm_err(&i915->drm, "PLL %d Power not disabled\n",
3935 			pll->info->id);
3936 }
3937 
3938 static void combo_pll_disable(struct drm_i915_private *i915,
3939 			      struct intel_shared_dpll *pll)
3940 {
3941 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3942 
3943 	icl_pll_disable(i915, pll, enable_reg);
3944 
3945 	if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3946 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3947 		intel_display_power_put(i915, POWER_DOMAIN_DC_OFF,
3948 					pll->wakeref);
3949 }
3950 
3951 static void tbt_pll_disable(struct drm_i915_private *i915,
3952 			    struct intel_shared_dpll *pll)
3953 {
3954 	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
3955 }
3956 
3957 static void mg_pll_disable(struct drm_i915_private *i915,
3958 			   struct intel_shared_dpll *pll)
3959 {
3960 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3961 
3962 	icl_pll_disable(i915, pll, enable_reg);
3963 }
3964 
3965 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3966 {
3967 	/* No SSC ref */
3968 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3969 }
3970 
3971 static void icl_dump_hw_state(struct drm_i915_private *i915,
3972 			      const struct intel_dpll_hw_state *hw_state)
3973 {
3974 	drm_dbg_kms(&i915->drm,
3975 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3976 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3977 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3978 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3979 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3980 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3981 		    hw_state->cfgcr0, hw_state->cfgcr1,
3982 		    hw_state->div0,
3983 		    hw_state->mg_refclkin_ctl,
3984 		    hw_state->mg_clktop2_coreclkctl1,
3985 		    hw_state->mg_clktop2_hsclkctl,
3986 		    hw_state->mg_pll_div0,
3987 		    hw_state->mg_pll_div1,
3988 		    hw_state->mg_pll_lf,
3989 		    hw_state->mg_pll_frac_lock,
3990 		    hw_state->mg_pll_ssc,
3991 		    hw_state->mg_pll_bias,
3992 		    hw_state->mg_pll_tdc_coldst_bias);
3993 }
3994 
3995 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3996 	.enable = combo_pll_enable,
3997 	.disable = combo_pll_disable,
3998 	.get_hw_state = combo_pll_get_hw_state,
3999 	.get_freq = icl_ddi_combo_pll_get_freq,
4000 };
4001 
4002 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4003 	.enable = tbt_pll_enable,
4004 	.disable = tbt_pll_disable,
4005 	.get_hw_state = tbt_pll_get_hw_state,
4006 	.get_freq = icl_ddi_tbt_pll_get_freq,
4007 };
4008 
4009 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4010 	.enable = mg_pll_enable,
4011 	.disable = mg_pll_disable,
4012 	.get_hw_state = mg_pll_get_hw_state,
4013 	.get_freq = icl_ddi_mg_pll_get_freq,
4014 };
4015 
4016 static const struct dpll_info icl_plls[] = {
4017 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4018 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4019 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4020 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4021 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4022 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4023 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4024 	{ },
4025 };
4026 
4027 static const struct intel_dpll_mgr icl_pll_mgr = {
4028 	.dpll_info = icl_plls,
4029 	.compute_dplls = icl_compute_dplls,
4030 	.get_dplls = icl_get_dplls,
4031 	.put_dplls = icl_put_dplls,
4032 	.update_active_dpll = icl_update_active_dpll,
4033 	.update_ref_clks = icl_update_dpll_ref_clks,
4034 	.dump_hw_state = icl_dump_hw_state,
4035 };
4036 
4037 static const struct dpll_info ehl_plls[] = {
4038 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4039 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4040 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4041 	{ },
4042 };
4043 
4044 static const struct intel_dpll_mgr ehl_pll_mgr = {
4045 	.dpll_info = ehl_plls,
4046 	.compute_dplls = icl_compute_dplls,
4047 	.get_dplls = icl_get_dplls,
4048 	.put_dplls = icl_put_dplls,
4049 	.update_ref_clks = icl_update_dpll_ref_clks,
4050 	.dump_hw_state = icl_dump_hw_state,
4051 };
4052 
4053 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4054 	.enable = mg_pll_enable,
4055 	.disable = mg_pll_disable,
4056 	.get_hw_state = dkl_pll_get_hw_state,
4057 	.get_freq = icl_ddi_mg_pll_get_freq,
4058 };
4059 
4060 static const struct dpll_info tgl_plls[] = {
4061 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4062 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4063 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4064 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4065 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4066 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4067 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4068 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4069 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4070 	{ },
4071 };
4072 
4073 static const struct intel_dpll_mgr tgl_pll_mgr = {
4074 	.dpll_info = tgl_plls,
4075 	.compute_dplls = icl_compute_dplls,
4076 	.get_dplls = icl_get_dplls,
4077 	.put_dplls = icl_put_dplls,
4078 	.update_active_dpll = icl_update_active_dpll,
4079 	.update_ref_clks = icl_update_dpll_ref_clks,
4080 	.dump_hw_state = icl_dump_hw_state,
4081 };
4082 
4083 static const struct dpll_info rkl_plls[] = {
4084 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4085 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4086 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4087 	{ },
4088 };
4089 
4090 static const struct intel_dpll_mgr rkl_pll_mgr = {
4091 	.dpll_info = rkl_plls,
4092 	.compute_dplls = icl_compute_dplls,
4093 	.get_dplls = icl_get_dplls,
4094 	.put_dplls = icl_put_dplls,
4095 	.update_ref_clks = icl_update_dpll_ref_clks,
4096 	.dump_hw_state = icl_dump_hw_state,
4097 };
4098 
4099 static const struct dpll_info dg1_plls[] = {
4100 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4101 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4102 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4103 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4104 	{ },
4105 };
4106 
4107 static const struct intel_dpll_mgr dg1_pll_mgr = {
4108 	.dpll_info = dg1_plls,
4109 	.compute_dplls = icl_compute_dplls,
4110 	.get_dplls = icl_get_dplls,
4111 	.put_dplls = icl_put_dplls,
4112 	.update_ref_clks = icl_update_dpll_ref_clks,
4113 	.dump_hw_state = icl_dump_hw_state,
4114 };
4115 
4116 static const struct dpll_info adls_plls[] = {
4117 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4118 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4119 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4120 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4121 	{ },
4122 };
4123 
4124 static const struct intel_dpll_mgr adls_pll_mgr = {
4125 	.dpll_info = adls_plls,
4126 	.compute_dplls = icl_compute_dplls,
4127 	.get_dplls = icl_get_dplls,
4128 	.put_dplls = icl_put_dplls,
4129 	.update_ref_clks = icl_update_dpll_ref_clks,
4130 	.dump_hw_state = icl_dump_hw_state,
4131 };
4132 
4133 static const struct dpll_info adlp_plls[] = {
4134 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4135 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4136 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4137 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4138 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4139 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4140 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4141 	{ },
4142 };
4143 
4144 static const struct intel_dpll_mgr adlp_pll_mgr = {
4145 	.dpll_info = adlp_plls,
4146 	.compute_dplls = icl_compute_dplls,
4147 	.get_dplls = icl_get_dplls,
4148 	.put_dplls = icl_put_dplls,
4149 	.update_active_dpll = icl_update_active_dpll,
4150 	.update_ref_clks = icl_update_dpll_ref_clks,
4151 	.dump_hw_state = icl_dump_hw_state,
4152 };
4153 
4154 /**
4155  * intel_shared_dpll_init - Initialize shared DPLLs
4156  * @i915: i915 device
4157  *
4158  * Initialize shared DPLLs for @i915.
4159  */
4160 void intel_shared_dpll_init(struct drm_i915_private *i915)
4161 {
4162 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4163 	const struct dpll_info *dpll_info;
4164 	int i;
4165 
4166 	mutex_init(&i915->display.dpll.lock);
4167 
4168 	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4169 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4170 		dpll_mgr = NULL;
4171 	else if (IS_ALDERLAKE_P(i915))
4172 		dpll_mgr = &adlp_pll_mgr;
4173 	else if (IS_ALDERLAKE_S(i915))
4174 		dpll_mgr = &adls_pll_mgr;
4175 	else if (IS_DG1(i915))
4176 		dpll_mgr = &dg1_pll_mgr;
4177 	else if (IS_ROCKETLAKE(i915))
4178 		dpll_mgr = &rkl_pll_mgr;
4179 	else if (DISPLAY_VER(i915) >= 12)
4180 		dpll_mgr = &tgl_pll_mgr;
4181 	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4182 		dpll_mgr = &ehl_pll_mgr;
4183 	else if (DISPLAY_VER(i915) >= 11)
4184 		dpll_mgr = &icl_pll_mgr;
4185 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4186 		dpll_mgr = &bxt_pll_mgr;
4187 	else if (DISPLAY_VER(i915) == 9)
4188 		dpll_mgr = &skl_pll_mgr;
4189 	else if (HAS_DDI(i915))
4190 		dpll_mgr = &hsw_pll_mgr;
4191 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4192 		dpll_mgr = &pch_pll_mgr;
4193 
4194 	if (!dpll_mgr)
4195 		return;
4196 
4197 	dpll_info = dpll_mgr->dpll_info;
4198 
4199 	for (i = 0; dpll_info[i].name; i++) {
4200 		if (drm_WARN_ON(&i915->drm,
4201 				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4202 			break;
4203 
4204 		/* must fit into unsigned long bitmask on 32bit */
4205 		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4206 			break;
4207 
4208 		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4209 		i915->display.dpll.shared_dplls[i].index = i;
4210 	}
4211 
4212 	i915->display.dpll.mgr = dpll_mgr;
4213 	i915->display.dpll.num_shared_dpll = i;
4214 }
4215 
4216 /**
4217  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4218  * @state: atomic state
4219  * @crtc: CRTC to compute DPLLs for
4220  * @encoder: encoder
4221  *
4222  * This function computes the DPLL state for the given CRTC and encoder.
4223  *
4224  * The new configuration in the atomic commit @state is made effective by
4225  * calling intel_shared_dpll_swap_state().
4226  *
4227  * Returns:
4228  * 0 on success, negative error code on falure.
4229  */
4230 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4231 			       struct intel_crtc *crtc,
4232 			       struct intel_encoder *encoder)
4233 {
4234 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4235 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4236 
4237 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4238 		return -EINVAL;
4239 
4240 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4241 }
4242 
4243 /**
4244  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4245  * @state: atomic state
4246  * @crtc: CRTC to reserve DPLLs for
4247  * @encoder: encoder
4248  *
4249  * This function reserves all required DPLLs for the given CRTC and encoder
4250  * combination in the current atomic commit @state and the new @crtc atomic
4251  * state.
4252  *
4253  * The new configuration in the atomic commit @state is made effective by
4254  * calling intel_shared_dpll_swap_state().
4255  *
4256  * The reserved DPLLs should be released by calling
4257  * intel_release_shared_dplls().
4258  *
4259  * Returns:
4260  * 0 if all required DPLLs were successfully reserved,
4261  * negative error code otherwise.
4262  */
4263 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4264 			       struct intel_crtc *crtc,
4265 			       struct intel_encoder *encoder)
4266 {
4267 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4268 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4269 
4270 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4271 		return -EINVAL;
4272 
4273 	return dpll_mgr->get_dplls(state, crtc, encoder);
4274 }
4275 
4276 /**
4277  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4278  * @state: atomic state
4279  * @crtc: crtc from which the DPLLs are to be released
4280  *
4281  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4282  * from the current atomic commit @state and the old @crtc atomic state.
4283  *
4284  * The new configuration in the atomic commit @state is made effective by
4285  * calling intel_shared_dpll_swap_state().
4286  */
4287 void intel_release_shared_dplls(struct intel_atomic_state *state,
4288 				struct intel_crtc *crtc)
4289 {
4290 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4291 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4292 
4293 	/*
4294 	 * FIXME: this function is called for every platform having a
4295 	 * compute_clock hook, even though the platform doesn't yet support
4296 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4297 	 * called on those.
4298 	 */
4299 	if (!dpll_mgr)
4300 		return;
4301 
4302 	dpll_mgr->put_dplls(state, crtc);
4303 }
4304 
4305 /**
4306  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4307  * @state: atomic state
4308  * @crtc: the CRTC for which to update the active DPLL
4309  * @encoder: encoder determining the type of port DPLL
4310  *
4311  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4312  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4313  * DPLL selected will be based on the current mode of the encoder's port.
4314  */
4315 void intel_update_active_dpll(struct intel_atomic_state *state,
4316 			      struct intel_crtc *crtc,
4317 			      struct intel_encoder *encoder)
4318 {
4319 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4320 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4321 
4322 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4323 		return;
4324 
4325 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4326 }
4327 
4328 /**
4329  * intel_dpll_get_freq - calculate the DPLL's output frequency
4330  * @i915: i915 device
4331  * @pll: DPLL for which to calculate the output frequency
4332  * @pll_state: DPLL state from which to calculate the output frequency
4333  *
4334  * Return the output frequency corresponding to @pll's passed in @pll_state.
4335  */
4336 int intel_dpll_get_freq(struct drm_i915_private *i915,
4337 			const struct intel_shared_dpll *pll,
4338 			const struct intel_dpll_hw_state *pll_state)
4339 {
4340 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4341 		return 0;
4342 
4343 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4344 }
4345 
4346 /**
4347  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4348  * @i915: i915 device
4349  * @pll: DPLL for which to calculate the output frequency
4350  * @hw_state: DPLL's hardware state
4351  *
4352  * Read out @pll's hardware state into @hw_state.
4353  */
4354 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4355 			     struct intel_shared_dpll *pll,
4356 			     struct intel_dpll_hw_state *hw_state)
4357 {
4358 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4359 }
4360 
4361 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4362 				  struct intel_shared_dpll *pll)
4363 {
4364 	struct intel_crtc *crtc;
4365 
4366 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4367 
4368 	if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
4369 	    pll->on &&
4370 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4371 		pll->wakeref = intel_display_power_get(i915,
4372 						       POWER_DOMAIN_DC_OFF);
4373 	}
4374 
4375 	pll->state.pipe_mask = 0;
4376 	for_each_intel_crtc(&i915->drm, crtc) {
4377 		struct intel_crtc_state *crtc_state =
4378 			to_intel_crtc_state(crtc->base.state);
4379 
4380 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4381 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4382 	}
4383 	pll->active_mask = pll->state.pipe_mask;
4384 
4385 	drm_dbg_kms(&i915->drm,
4386 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4387 		    pll->info->name, pll->state.pipe_mask, pll->on);
4388 }
4389 
4390 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4391 {
4392 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4393 		i915->display.dpll.mgr->update_ref_clks(i915);
4394 }
4395 
4396 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4397 {
4398 	struct intel_shared_dpll *pll;
4399 	int i;
4400 
4401 	for_each_shared_dpll(i915, pll, i)
4402 		readout_dpll_hw_state(i915, pll);
4403 }
4404 
4405 static void sanitize_dpll_state(struct drm_i915_private *i915,
4406 				struct intel_shared_dpll *pll)
4407 {
4408 	if (!pll->on)
4409 		return;
4410 
4411 	adlp_cmtg_clock_gating_wa(i915, pll);
4412 
4413 	if (pll->active_mask)
4414 		return;
4415 
4416 	drm_dbg_kms(&i915->drm,
4417 		    "%s enabled but not in use, disabling\n",
4418 		    pll->info->name);
4419 
4420 	pll->info->funcs->disable(i915, pll);
4421 	pll->on = false;
4422 }
4423 
4424 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4425 {
4426 	struct intel_shared_dpll *pll;
4427 	int i;
4428 
4429 	for_each_shared_dpll(i915, pll, i)
4430 		sanitize_dpll_state(i915, pll);
4431 }
4432 
4433 /**
4434  * intel_dpll_dump_hw_state - write hw_state to dmesg
4435  * @i915: i915 drm device
4436  * @hw_state: hw state to be written to the log
4437  *
4438  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4439  */
4440 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4441 			      const struct intel_dpll_hw_state *hw_state)
4442 {
4443 	if (i915->display.dpll.mgr) {
4444 		i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
4445 	} else {
4446 		/* fallback for platforms that don't use the shared dpll
4447 		 * infrastructure
4448 		 */
4449 		drm_dbg_kms(&i915->drm,
4450 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4451 			    "fp0: 0x%x, fp1: 0x%x\n",
4452 			    hw_state->dpll,
4453 			    hw_state->dpll_md,
4454 			    hw_state->fp0,
4455 			    hw_state->fp1);
4456 	}
4457 }
4458 
4459 static void
4460 verify_single_dpll_state(struct drm_i915_private *i915,
4461 			 struct intel_shared_dpll *pll,
4462 			 struct intel_crtc *crtc,
4463 			 const struct intel_crtc_state *new_crtc_state)
4464 {
4465 	struct intel_dpll_hw_state dpll_hw_state;
4466 	u8 pipe_mask;
4467 	bool active;
4468 
4469 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4470 
4471 	drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
4472 
4473 	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4474 
4475 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4476 		I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4477 				"pll in active use but not on in sw tracking\n");
4478 		I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4479 				"pll is on but not used by any active pipe\n");
4480 		I915_STATE_WARN(i915, pll->on != active,
4481 				"pll on state mismatch (expected %i, found %i)\n",
4482 				pll->on, active);
4483 	}
4484 
4485 	if (!crtc) {
4486 		I915_STATE_WARN(i915,
4487 				pll->active_mask & ~pll->state.pipe_mask,
4488 				"more active pll users than references: 0x%x vs 0x%x\n",
4489 				pll->active_mask, pll->state.pipe_mask);
4490 
4491 		return;
4492 	}
4493 
4494 	pipe_mask = BIT(crtc->pipe);
4495 
4496 	if (new_crtc_state->hw.active)
4497 		I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4498 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4499 				pipe_name(crtc->pipe), pll->active_mask);
4500 	else
4501 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4502 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4503 				pipe_name(crtc->pipe), pll->active_mask);
4504 
4505 	I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4506 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4507 			pipe_mask, pll->state.pipe_mask);
4508 
4509 	I915_STATE_WARN(i915,
4510 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4511 					  sizeof(dpll_hw_state)),
4512 			"pll hw state mismatch\n");
4513 }
4514 
4515 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4516 				    struct intel_crtc *crtc)
4517 {
4518 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4519 	const struct intel_crtc_state *old_crtc_state =
4520 		intel_atomic_get_old_crtc_state(state, crtc);
4521 	const struct intel_crtc_state *new_crtc_state =
4522 		intel_atomic_get_new_crtc_state(state, crtc);
4523 
4524 	if (new_crtc_state->shared_dpll)
4525 		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4526 					 crtc, new_crtc_state);
4527 
4528 	if (old_crtc_state->shared_dpll &&
4529 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4530 		u8 pipe_mask = BIT(crtc->pipe);
4531 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4532 
4533 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4534 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4535 				pipe_name(crtc->pipe), pll->active_mask);
4536 		I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
4537 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4538 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4539 	}
4540 }
4541 
4542 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4543 {
4544 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4545 	struct intel_shared_dpll *pll;
4546 	int i;
4547 
4548 	for_each_shared_dpll(i915, pll, i)
4549 		verify_single_dpll_state(i915, pll, NULL, NULL);
4550 }
4551