xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "bxt_dpio_phy_regs.h"
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_de.h"
31 #include "intel_display_types.h"
32 #include "intel_dkl_phy.h"
33 #include "intel_dkl_phy_regs.h"
34 #include "intel_dpio_phy.h"
35 #include "intel_dpll.h"
36 #include "intel_dpll_mgr.h"
37 #include "intel_hti.h"
38 #include "intel_mg_phy_regs.h"
39 #include "intel_pch_refclk.h"
40 #include "intel_tc.h"
41 
42 /**
43  * DOC: Display PLLs
44  *
45  * Display PLLs used for driving outputs vary by platform. While some have
46  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
47  * from a pool. In the latter scenario, it is possible that multiple pipes
48  * share a PLL if their configurations match.
49  *
50  * This file provides an abstraction over display PLLs. The function
51  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
52  * users of a PLL are tracked and that tracking is integrated with the atomic
53  * modset interface. During an atomic operation, required PLLs can be reserved
54  * for a given CRTC and encoder configuration by calling
55  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
56  * with intel_release_shared_dplls().
57  * Changes to the users are first staged in the atomic state, and then made
58  * effective by calling intel_shared_dpll_swap_state() during the atomic
59  * commit phase.
60  */
61 
62 /* platform specific hooks for managing DPLLs */
63 struct intel_shared_dpll_funcs {
64 	/*
65 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
66 	 * the pll is not already enabled.
67 	 */
68 	void (*enable)(struct drm_i915_private *i915,
69 		       struct intel_shared_dpll *pll,
70 		       const struct intel_dpll_hw_state *dpll_hw_state);
71 
72 	/*
73 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
74 	 * only when it is safe to disable the pll, i.e., there are no more
75 	 * tracked users for it.
76 	 */
77 	void (*disable)(struct drm_i915_private *i915,
78 			struct intel_shared_dpll *pll);
79 
80 	/*
81 	 * Hook for reading the values currently programmed to the DPLL
82 	 * registers. This is used for initial hw state readout and state
83 	 * verification after a mode set.
84 	 */
85 	bool (*get_hw_state)(struct drm_i915_private *i915,
86 			     struct intel_shared_dpll *pll,
87 			     struct intel_dpll_hw_state *dpll_hw_state);
88 
89 	/*
90 	 * Hook for calculating the pll's output frequency based on its passed
91 	 * in state.
92 	 */
93 	int (*get_freq)(struct drm_i915_private *i915,
94 			const struct intel_shared_dpll *pll,
95 			const struct intel_dpll_hw_state *dpll_hw_state);
96 };
97 
98 struct intel_dpll_mgr {
99 	const struct dpll_info *dpll_info;
100 
101 	int (*compute_dplls)(struct intel_atomic_state *state,
102 			     struct intel_crtc *crtc,
103 			     struct intel_encoder *encoder);
104 	int (*get_dplls)(struct intel_atomic_state *state,
105 			 struct intel_crtc *crtc,
106 			 struct intel_encoder *encoder);
107 	void (*put_dplls)(struct intel_atomic_state *state,
108 			  struct intel_crtc *crtc);
109 	void (*update_active_dpll)(struct intel_atomic_state *state,
110 				   struct intel_crtc *crtc,
111 				   struct intel_encoder *encoder);
112 	void (*update_ref_clks)(struct drm_i915_private *i915);
113 	void (*dump_hw_state)(struct drm_printer *p,
114 			      const struct intel_dpll_hw_state *dpll_hw_state);
115 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
116 				 const struct intel_dpll_hw_state *b);
117 };
118 
119 static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll_state * shared_dpll)120 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
121 				  struct intel_shared_dpll_state *shared_dpll)
122 {
123 	struct intel_shared_dpll *pll;
124 	int i;
125 
126 	/* Copy shared dpll state */
127 	for_each_shared_dpll(i915, pll, i)
128 		shared_dpll[pll->index] = pll->state;
129 }
130 
131 static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state * s)132 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
133 {
134 	struct intel_atomic_state *state = to_intel_atomic_state(s);
135 
136 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
137 
138 	if (!state->dpll_set) {
139 		state->dpll_set = true;
140 
141 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
142 						  state->shared_dpll);
143 	}
144 
145 	return state->shared_dpll;
146 }
147 
148 /**
149  * intel_get_shared_dpll_by_id - get a DPLL given its id
150  * @i915: i915 device instance
151  * @id: pll id
152  *
153  * Returns:
154  * A pointer to the DPLL with @id
155  */
156 struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private * i915,enum intel_dpll_id id)157 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
158 			    enum intel_dpll_id id)
159 {
160 	struct intel_shared_dpll *pll;
161 	int i;
162 
163 	for_each_shared_dpll(i915, pll, i) {
164 		if (pll->info->id == id)
165 			return pll;
166 	}
167 
168 	MISSING_CASE(id);
169 	return NULL;
170 }
171 
172 /* For ILK+ */
assert_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll,bool state)173 void assert_shared_dpll(struct drm_i915_private *i915,
174 			struct intel_shared_dpll *pll,
175 			bool state)
176 {
177 	struct intel_display *display = &i915->display;
178 	bool cur_state;
179 	struct intel_dpll_hw_state hw_state;
180 
181 	if (drm_WARN(display->drm, !pll,
182 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
183 		return;
184 
185 	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
186 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
187 				 "%s assertion failure (expected %s, current %s)\n",
188 				 pll->info->name, str_on_off(state),
189 				 str_on_off(cur_state));
190 }
191 
icl_pll_id_to_tc_port(enum intel_dpll_id id)192 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
193 {
194 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
195 }
196 
icl_tc_port_to_pll_id(enum tc_port tc_port)197 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
198 {
199 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
200 }
201 
202 static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)203 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
204 			   struct intel_shared_dpll *pll)
205 {
206 	if (IS_DG1(i915))
207 		return DG1_DPLL_ENABLE(pll->info->id);
208 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
209 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
210 		return MG_PLL_ENABLE(0);
211 
212 	return ICL_DPLL_ENABLE(pll->info->id);
213 }
214 
215 static i915_reg_t
intel_tc_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)216 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
217 			struct intel_shared_dpll *pll)
218 {
219 	const enum intel_dpll_id id = pll->info->id;
220 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
221 
222 	if (IS_ALDERLAKE_P(i915))
223 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
224 
225 	return MG_PLL_ENABLE(tc_port);
226 }
227 
_intel_enable_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll)228 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
229 				      struct intel_shared_dpll *pll)
230 {
231 	if (pll->info->power_domain)
232 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
233 
234 	pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
235 	pll->on = true;
236 }
237 
_intel_disable_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll)238 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
239 				       struct intel_shared_dpll *pll)
240 {
241 	pll->info->funcs->disable(i915, pll);
242 	pll->on = false;
243 
244 	if (pll->info->power_domain)
245 		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
246 }
247 
248 /**
249  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
250  * @crtc_state: CRTC, and its state, which has a shared DPLL
251  *
252  * Enable the shared DPLL used by @crtc.
253  */
intel_enable_shared_dpll(const struct intel_crtc_state * crtc_state)254 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
255 {
256 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
257 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
258 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
259 	unsigned int pipe_mask = BIT(crtc->pipe);
260 	unsigned int old_mask;
261 
262 	if (drm_WARN_ON(&i915->drm, pll == NULL))
263 		return;
264 
265 	mutex_lock(&i915->display.dpll.lock);
266 	old_mask = pll->active_mask;
267 
268 	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
269 	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
270 		goto out;
271 
272 	pll->active_mask |= pipe_mask;
273 
274 	drm_dbg_kms(&i915->drm,
275 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
276 		    pll->info->name, pll->active_mask, pll->on,
277 		    crtc->base.base.id, crtc->base.name);
278 
279 	if (old_mask) {
280 		drm_WARN_ON(&i915->drm, !pll->on);
281 		assert_shared_dpll_enabled(i915, pll);
282 		goto out;
283 	}
284 	drm_WARN_ON(&i915->drm, pll->on);
285 
286 	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
287 
288 	_intel_enable_shared_dpll(i915, pll);
289 
290 out:
291 	mutex_unlock(&i915->display.dpll.lock);
292 }
293 
294 /**
295  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
296  * @crtc_state: CRTC, and its state, which has a shared DPLL
297  *
298  * Disable the shared DPLL used by @crtc.
299  */
intel_disable_shared_dpll(const struct intel_crtc_state * crtc_state)300 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
301 {
302 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
303 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
304 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
305 	unsigned int pipe_mask = BIT(crtc->pipe);
306 
307 	/* PCH only available on ILK+ */
308 	if (DISPLAY_VER(i915) < 5)
309 		return;
310 
311 	if (pll == NULL)
312 		return;
313 
314 	mutex_lock(&i915->display.dpll.lock);
315 	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
316 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
317 		     crtc->base.base.id, crtc->base.name))
318 		goto out;
319 
320 	drm_dbg_kms(&i915->drm,
321 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
322 		    pll->info->name, pll->active_mask, pll->on,
323 		    crtc->base.base.id, crtc->base.name);
324 
325 	assert_shared_dpll_enabled(i915, pll);
326 	drm_WARN_ON(&i915->drm, !pll->on);
327 
328 	pll->active_mask &= ~pipe_mask;
329 	if (pll->active_mask)
330 		goto out;
331 
332 	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
333 
334 	_intel_disable_shared_dpll(i915, pll);
335 
336 out:
337 	mutex_unlock(&i915->display.dpll.lock);
338 }
339 
340 static unsigned long
intel_dpll_mask_all(struct drm_i915_private * i915)341 intel_dpll_mask_all(struct drm_i915_private *i915)
342 {
343 	struct intel_shared_dpll *pll;
344 	unsigned long dpll_mask = 0;
345 	int i;
346 
347 	for_each_shared_dpll(i915, pll, i) {
348 		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
349 
350 		dpll_mask |= BIT(pll->info->id);
351 	}
352 
353 	return dpll_mask;
354 }
355 
356 static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_dpll_hw_state * dpll_hw_state,unsigned long dpll_mask)357 intel_find_shared_dpll(struct intel_atomic_state *state,
358 		       const struct intel_crtc *crtc,
359 		       const struct intel_dpll_hw_state *dpll_hw_state,
360 		       unsigned long dpll_mask)
361 {
362 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
363 	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
364 	struct intel_shared_dpll_state *shared_dpll;
365 	struct intel_shared_dpll *unused_pll = NULL;
366 	enum intel_dpll_id id;
367 
368 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
369 
370 	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
371 
372 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
373 		struct intel_shared_dpll *pll;
374 
375 		pll = intel_get_shared_dpll_by_id(i915, id);
376 		if (!pll)
377 			continue;
378 
379 		/* Only want to check enabled timings first */
380 		if (shared_dpll[pll->index].pipe_mask == 0) {
381 			if (!unused_pll)
382 				unused_pll = pll;
383 			continue;
384 		}
385 
386 		if (memcmp(dpll_hw_state,
387 			   &shared_dpll[pll->index].hw_state,
388 			   sizeof(*dpll_hw_state)) == 0) {
389 			drm_dbg_kms(&i915->drm,
390 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
391 				    crtc->base.base.id, crtc->base.name,
392 				    pll->info->name,
393 				    shared_dpll[pll->index].pipe_mask,
394 				    pll->active_mask);
395 			return pll;
396 		}
397 	}
398 
399 	/* Ok no matching timings, maybe there's a free one? */
400 	if (unused_pll) {
401 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
402 			    crtc->base.base.id, crtc->base.name,
403 			    unused_pll->info->name);
404 		return unused_pll;
405 	}
406 
407 	return NULL;
408 }
409 
410 /**
411  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
412  * @crtc: CRTC on which behalf the reference is taken
413  * @pll: DPLL for which the reference is taken
414  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
415  *
416  * Take a reference for @pll tracking the use of it by @crtc.
417  */
418 static void
intel_reference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)419 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
420 				 const struct intel_shared_dpll *pll,
421 				 struct intel_shared_dpll_state *shared_dpll_state)
422 {
423 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
424 
425 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
426 
427 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
428 
429 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
430 		    crtc->base.base.id, crtc->base.name, pll->info->name);
431 }
432 
433 static void
intel_reference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)434 intel_reference_shared_dpll(struct intel_atomic_state *state,
435 			    const struct intel_crtc *crtc,
436 			    const struct intel_shared_dpll *pll,
437 			    const struct intel_dpll_hw_state *dpll_hw_state)
438 {
439 	struct intel_shared_dpll_state *shared_dpll;
440 
441 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
442 
443 	if (shared_dpll[pll->index].pipe_mask == 0)
444 		shared_dpll[pll->index].hw_state = *dpll_hw_state;
445 
446 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
447 }
448 
449 /**
450  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
451  * @crtc: CRTC on which behalf the reference is dropped
452  * @pll: DPLL for which the reference is dropped
453  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
454  *
455  * Drop a reference for @pll tracking the end of use of it by @crtc.
456  */
457 void
intel_unreference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)458 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
459 				   const struct intel_shared_dpll *pll,
460 				   struct intel_shared_dpll_state *shared_dpll_state)
461 {
462 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
463 
464 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
465 
466 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
467 
468 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
469 		    crtc->base.base.id, crtc->base.name, pll->info->name);
470 }
471 
intel_unreference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll)472 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
473 					  const struct intel_crtc *crtc,
474 					  const struct intel_shared_dpll *pll)
475 {
476 	struct intel_shared_dpll_state *shared_dpll;
477 
478 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
479 
480 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
481 }
482 
intel_put_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)483 static void intel_put_dpll(struct intel_atomic_state *state,
484 			   struct intel_crtc *crtc)
485 {
486 	const struct intel_crtc_state *old_crtc_state =
487 		intel_atomic_get_old_crtc_state(state, crtc);
488 	struct intel_crtc_state *new_crtc_state =
489 		intel_atomic_get_new_crtc_state(state, crtc);
490 
491 	new_crtc_state->shared_dpll = NULL;
492 
493 	if (!old_crtc_state->shared_dpll)
494 		return;
495 
496 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
497 }
498 
499 /**
500  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
501  * @state: atomic state
502  *
503  * This is the dpll version of drm_atomic_helper_swap_state() since the
504  * helper does not handle driver-specific global state.
505  *
506  * For consistency with atomic helpers this function does a complete swap,
507  * i.e. it also puts the current state into @state, even though there is no
508  * need for that at this moment.
509  */
intel_shared_dpll_swap_state(struct intel_atomic_state * state)510 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
511 {
512 	struct drm_i915_private *i915 = to_i915(state->base.dev);
513 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
514 	struct intel_shared_dpll *pll;
515 	int i;
516 
517 	if (!state->dpll_set)
518 		return;
519 
520 	for_each_shared_dpll(i915, pll, i)
521 		swap(pll->state, shared_dpll[pll->index]);
522 }
523 
ibx_pch_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)524 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
525 				      struct intel_shared_dpll *pll,
526 				      struct intel_dpll_hw_state *dpll_hw_state)
527 {
528 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
529 	const enum intel_dpll_id id = pll->info->id;
530 	intel_wakeref_t wakeref;
531 	u32 val;
532 
533 	wakeref = intel_display_power_get_if_enabled(i915,
534 						     POWER_DOMAIN_DISPLAY_CORE);
535 	if (!wakeref)
536 		return false;
537 
538 	val = intel_de_read(i915, PCH_DPLL(id));
539 	hw_state->dpll = val;
540 	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
541 	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
542 
543 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
544 
545 	return val & DPLL_VCO_ENABLE;
546 }
547 
ibx_assert_pch_refclk_enabled(struct drm_i915_private * i915)548 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
549 {
550 	struct intel_display *display = &i915->display;
551 	u32 val;
552 	bool enabled;
553 
554 	val = intel_de_read(display, PCH_DREF_CONTROL);
555 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
556 			    DREF_SUPERSPREAD_SOURCE_MASK));
557 	INTEL_DISPLAY_STATE_WARN(display, !enabled,
558 				 "PCH refclk assertion failure, should be active but is disabled\n");
559 }
560 
ibx_pch_dpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)561 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
562 				struct intel_shared_dpll *pll,
563 				const struct intel_dpll_hw_state *dpll_hw_state)
564 {
565 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
566 	const enum intel_dpll_id id = pll->info->id;
567 
568 	/* PCH refclock must be enabled first */
569 	ibx_assert_pch_refclk_enabled(i915);
570 
571 	intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
572 	intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
573 
574 	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
575 
576 	/* Wait for the clocks to stabilize. */
577 	intel_de_posting_read(i915, PCH_DPLL(id));
578 	udelay(150);
579 
580 	/* The pixel multiplier can only be updated once the
581 	 * DPLL is enabled and the clocks are stable.
582 	 *
583 	 * So write it again.
584 	 */
585 	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
586 	intel_de_posting_read(i915, PCH_DPLL(id));
587 	udelay(200);
588 }
589 
ibx_pch_dpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)590 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
591 				 struct intel_shared_dpll *pll)
592 {
593 	const enum intel_dpll_id id = pll->info->id;
594 
595 	intel_de_write(i915, PCH_DPLL(id), 0);
596 	intel_de_posting_read(i915, PCH_DPLL(id));
597 	udelay(200);
598 }
599 
ibx_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)600 static int ibx_compute_dpll(struct intel_atomic_state *state,
601 			    struct intel_crtc *crtc,
602 			    struct intel_encoder *encoder)
603 {
604 	return 0;
605 }
606 
ibx_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)607 static int ibx_get_dpll(struct intel_atomic_state *state,
608 			struct intel_crtc *crtc,
609 			struct intel_encoder *encoder)
610 {
611 	struct intel_crtc_state *crtc_state =
612 		intel_atomic_get_new_crtc_state(state, crtc);
613 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
614 	struct intel_shared_dpll *pll;
615 	enum intel_dpll_id id;
616 
617 	if (HAS_PCH_IBX(i915)) {
618 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
619 		id = (enum intel_dpll_id) crtc->pipe;
620 		pll = intel_get_shared_dpll_by_id(i915, id);
621 
622 		drm_dbg_kms(&i915->drm,
623 			    "[CRTC:%d:%s] using pre-allocated %s\n",
624 			    crtc->base.base.id, crtc->base.name,
625 			    pll->info->name);
626 	} else {
627 		pll = intel_find_shared_dpll(state, crtc,
628 					     &crtc_state->dpll_hw_state,
629 					     BIT(DPLL_ID_PCH_PLL_B) |
630 					     BIT(DPLL_ID_PCH_PLL_A));
631 	}
632 
633 	if (!pll)
634 		return -EINVAL;
635 
636 	/* reference the pll */
637 	intel_reference_shared_dpll(state, crtc,
638 				    pll, &crtc_state->dpll_hw_state);
639 
640 	crtc_state->shared_dpll = pll;
641 
642 	return 0;
643 }
644 
ibx_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)645 static void ibx_dump_hw_state(struct drm_printer *p,
646 			      const struct intel_dpll_hw_state *dpll_hw_state)
647 {
648 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
649 
650 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
651 		   "fp0: 0x%x, fp1: 0x%x\n",
652 		   hw_state->dpll,
653 		   hw_state->dpll_md,
654 		   hw_state->fp0,
655 		   hw_state->fp1);
656 }
657 
ibx_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)658 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
659 				 const struct intel_dpll_hw_state *_b)
660 {
661 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
662 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
663 
664 	return a->dpll == b->dpll &&
665 		a->dpll_md == b->dpll_md &&
666 		a->fp0 == b->fp0 &&
667 		a->fp1 == b->fp1;
668 }
669 
670 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
671 	.enable = ibx_pch_dpll_enable,
672 	.disable = ibx_pch_dpll_disable,
673 	.get_hw_state = ibx_pch_dpll_get_hw_state,
674 };
675 
676 static const struct dpll_info pch_plls[] = {
677 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
678 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
679 	{}
680 };
681 
682 static const struct intel_dpll_mgr pch_pll_mgr = {
683 	.dpll_info = pch_plls,
684 	.compute_dplls = ibx_compute_dpll,
685 	.get_dplls = ibx_get_dpll,
686 	.put_dplls = intel_put_dpll,
687 	.dump_hw_state = ibx_dump_hw_state,
688 	.compare_hw_state = ibx_compare_hw_state,
689 };
690 
hsw_ddi_wrpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)691 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
692 				 struct intel_shared_dpll *pll,
693 				 const struct intel_dpll_hw_state *dpll_hw_state)
694 {
695 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
696 	const enum intel_dpll_id id = pll->info->id;
697 
698 	intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
699 	intel_de_posting_read(i915, WRPLL_CTL(id));
700 	udelay(20);
701 }
702 
hsw_ddi_spll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)703 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
704 				struct intel_shared_dpll *pll,
705 				const struct intel_dpll_hw_state *dpll_hw_state)
706 {
707 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
708 
709 	intel_de_write(i915, SPLL_CTL, hw_state->spll);
710 	intel_de_posting_read(i915, SPLL_CTL);
711 	udelay(20);
712 }
713 
hsw_ddi_wrpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)714 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
715 				  struct intel_shared_dpll *pll)
716 {
717 	const enum intel_dpll_id id = pll->info->id;
718 
719 	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
720 	intel_de_posting_read(i915, WRPLL_CTL(id));
721 
722 	/*
723 	 * Try to set up the PCH reference clock once all DPLLs
724 	 * that depend on it have been shut down.
725 	 */
726 	if (i915->display.dpll.pch_ssc_use & BIT(id))
727 		intel_init_pch_refclk(i915);
728 }
729 
hsw_ddi_spll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)730 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
731 				 struct intel_shared_dpll *pll)
732 {
733 	enum intel_dpll_id id = pll->info->id;
734 
735 	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
736 	intel_de_posting_read(i915, SPLL_CTL);
737 
738 	/*
739 	 * Try to set up the PCH reference clock once all DPLLs
740 	 * that depend on it have been shut down.
741 	 */
742 	if (i915->display.dpll.pch_ssc_use & BIT(id))
743 		intel_init_pch_refclk(i915);
744 }
745 
hsw_ddi_wrpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)746 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
747 				       struct intel_shared_dpll *pll,
748 				       struct intel_dpll_hw_state *dpll_hw_state)
749 {
750 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
751 	const enum intel_dpll_id id = pll->info->id;
752 	intel_wakeref_t wakeref;
753 	u32 val;
754 
755 	wakeref = intel_display_power_get_if_enabled(i915,
756 						     POWER_DOMAIN_DISPLAY_CORE);
757 	if (!wakeref)
758 		return false;
759 
760 	val = intel_de_read(i915, WRPLL_CTL(id));
761 	hw_state->wrpll = val;
762 
763 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
764 
765 	return val & WRPLL_PLL_ENABLE;
766 }
767 
hsw_ddi_spll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)768 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
769 				      struct intel_shared_dpll *pll,
770 				      struct intel_dpll_hw_state *dpll_hw_state)
771 {
772 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
773 	intel_wakeref_t wakeref;
774 	u32 val;
775 
776 	wakeref = intel_display_power_get_if_enabled(i915,
777 						     POWER_DOMAIN_DISPLAY_CORE);
778 	if (!wakeref)
779 		return false;
780 
781 	val = intel_de_read(i915, SPLL_CTL);
782 	hw_state->spll = val;
783 
784 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
785 
786 	return val & SPLL_PLL_ENABLE;
787 }
788 
789 #define LC_FREQ 2700
790 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
791 
792 #define P_MIN 2
793 #define P_MAX 64
794 #define P_INC 2
795 
796 /* Constraints for PLL good behavior */
797 #define REF_MIN 48
798 #define REF_MAX 400
799 #define VCO_MIN 2400
800 #define VCO_MAX 4800
801 
802 struct hsw_wrpll_rnp {
803 	unsigned p, n2, r2;
804 };
805 
hsw_wrpll_get_budget_for_freq(int clock)806 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
807 {
808 	switch (clock) {
809 	case 25175000:
810 	case 25200000:
811 	case 27000000:
812 	case 27027000:
813 	case 37762500:
814 	case 37800000:
815 	case 40500000:
816 	case 40541000:
817 	case 54000000:
818 	case 54054000:
819 	case 59341000:
820 	case 59400000:
821 	case 72000000:
822 	case 74176000:
823 	case 74250000:
824 	case 81000000:
825 	case 81081000:
826 	case 89012000:
827 	case 89100000:
828 	case 108000000:
829 	case 108108000:
830 	case 111264000:
831 	case 111375000:
832 	case 148352000:
833 	case 148500000:
834 	case 162000000:
835 	case 162162000:
836 	case 222525000:
837 	case 222750000:
838 	case 296703000:
839 	case 297000000:
840 		return 0;
841 	case 233500000:
842 	case 245250000:
843 	case 247750000:
844 	case 253250000:
845 	case 298000000:
846 		return 1500;
847 	case 169128000:
848 	case 169500000:
849 	case 179500000:
850 	case 202000000:
851 		return 2000;
852 	case 256250000:
853 	case 262500000:
854 	case 270000000:
855 	case 272500000:
856 	case 273750000:
857 	case 280750000:
858 	case 281250000:
859 	case 286000000:
860 	case 291750000:
861 		return 4000;
862 	case 267250000:
863 	case 268500000:
864 		return 5000;
865 	default:
866 		return 1000;
867 	}
868 }
869 
hsw_wrpll_update_rnp(u64 freq2k,unsigned int budget,unsigned int r2,unsigned int n2,unsigned int p,struct hsw_wrpll_rnp * best)870 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
871 				 unsigned int r2, unsigned int n2,
872 				 unsigned int p,
873 				 struct hsw_wrpll_rnp *best)
874 {
875 	u64 a, b, c, d, diff, diff_best;
876 
877 	/* No best (r,n,p) yet */
878 	if (best->p == 0) {
879 		best->p = p;
880 		best->n2 = n2;
881 		best->r2 = r2;
882 		return;
883 	}
884 
885 	/*
886 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
887 	 * freq2k.
888 	 *
889 	 * delta = 1e6 *
890 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
891 	 *	   freq2k;
892 	 *
893 	 * and we would like delta <= budget.
894 	 *
895 	 * If the discrepancy is above the PPM-based budget, always prefer to
896 	 * improve upon the previous solution.  However, if you're within the
897 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
898 	 */
899 	a = freq2k * budget * p * r2;
900 	b = freq2k * budget * best->p * best->r2;
901 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
902 	diff_best = abs_diff(freq2k * best->p * best->r2,
903 			     LC_FREQ_2K * best->n2);
904 	c = 1000000 * diff;
905 	d = 1000000 * diff_best;
906 
907 	if (a < c && b < d) {
908 		/* If both are above the budget, pick the closer */
909 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
910 			best->p = p;
911 			best->n2 = n2;
912 			best->r2 = r2;
913 		}
914 	} else if (a >= c && b < d) {
915 		/* If A is below the threshold but B is above it?  Update. */
916 		best->p = p;
917 		best->n2 = n2;
918 		best->r2 = r2;
919 	} else if (a >= c && b >= d) {
920 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
921 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
922 			best->p = p;
923 			best->n2 = n2;
924 			best->r2 = r2;
925 		}
926 	}
927 	/* Otherwise a < c && b >= d, do nothing */
928 }
929 
930 static void
hsw_ddi_calculate_wrpll(int clock,unsigned * r2_out,unsigned * n2_out,unsigned * p_out)931 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
932 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
933 {
934 	u64 freq2k;
935 	unsigned p, n2, r2;
936 	struct hsw_wrpll_rnp best = {};
937 	unsigned budget;
938 
939 	freq2k = clock / 100;
940 
941 	budget = hsw_wrpll_get_budget_for_freq(clock);
942 
943 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
944 	 * and directly pass the LC PLL to it. */
945 	if (freq2k == 5400000) {
946 		*n2_out = 2;
947 		*p_out = 1;
948 		*r2_out = 2;
949 		return;
950 	}
951 
952 	/*
953 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
954 	 * the WR PLL.
955 	 *
956 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
957 	 * Injecting R2 = 2 * R gives:
958 	 *   REF_MAX * r2 > LC_FREQ * 2 and
959 	 *   REF_MIN * r2 < LC_FREQ * 2
960 	 *
961 	 * Which means the desired boundaries for r2 are:
962 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
963 	 *
964 	 */
965 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
966 	     r2 <= LC_FREQ * 2 / REF_MIN;
967 	     r2++) {
968 
969 		/*
970 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
971 		 *
972 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
973 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
974 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
975 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
976 		 *
977 		 * Which means the desired boundaries for n2 are:
978 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
979 		 */
980 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
981 		     n2 <= VCO_MAX * r2 / LC_FREQ;
982 		     n2++) {
983 
984 			for (p = P_MIN; p <= P_MAX; p += P_INC)
985 				hsw_wrpll_update_rnp(freq2k, budget,
986 						     r2, n2, p, &best);
987 		}
988 	}
989 
990 	*n2_out = best.n2;
991 	*p_out = best.p;
992 	*r2_out = best.r2;
993 }
994 
hsw_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)995 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
996 				  const struct intel_shared_dpll *pll,
997 				  const struct intel_dpll_hw_state *dpll_hw_state)
998 {
999 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1000 	int refclk;
1001 	int n, p, r;
1002 	u32 wrpll = hw_state->wrpll;
1003 
1004 	switch (wrpll & WRPLL_REF_MASK) {
1005 	case WRPLL_REF_SPECIAL_HSW:
1006 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1007 		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
1008 			refclk = i915->display.dpll.ref_clks.nssc;
1009 			break;
1010 		}
1011 		fallthrough;
1012 	case WRPLL_REF_PCH_SSC:
1013 		/*
1014 		 * We could calculate spread here, but our checking
1015 		 * code only cares about 5% accuracy, and spread is a max of
1016 		 * 0.5% downspread.
1017 		 */
1018 		refclk = i915->display.dpll.ref_clks.ssc;
1019 		break;
1020 	case WRPLL_REF_LCPLL:
1021 		refclk = 2700000;
1022 		break;
1023 	default:
1024 		MISSING_CASE(wrpll);
1025 		return 0;
1026 	}
1027 
1028 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1029 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1030 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1031 
1032 	/* Convert to KHz, p & r have a fixed point portion */
1033 	return (refclk * n / 10) / (p * r) * 2;
1034 }
1035 
1036 static int
hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1037 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1038 			   struct intel_crtc *crtc)
1039 {
1040 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1041 	struct intel_crtc_state *crtc_state =
1042 		intel_atomic_get_new_crtc_state(state, crtc);
1043 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1044 	unsigned int p, n2, r2;
1045 
1046 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1047 
1048 	hw_state->wrpll =
1049 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1050 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1051 		WRPLL_DIVIDER_POST(p);
1052 
1053 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1054 							&crtc_state->dpll_hw_state);
1055 
1056 	return 0;
1057 }
1058 
1059 static struct intel_shared_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1060 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1061 		       struct intel_crtc *crtc)
1062 {
1063 	struct intel_crtc_state *crtc_state =
1064 		intel_atomic_get_new_crtc_state(state, crtc);
1065 
1066 	return intel_find_shared_dpll(state, crtc,
1067 				      &crtc_state->dpll_hw_state,
1068 				      BIT(DPLL_ID_WRPLL2) |
1069 				      BIT(DPLL_ID_WRPLL1));
1070 }
1071 
1072 static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state * crtc_state)1073 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1074 {
1075 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1076 	int clock = crtc_state->port_clock;
1077 
1078 	switch (clock / 2) {
1079 	case 81000:
1080 	case 135000:
1081 	case 270000:
1082 		return 0;
1083 	default:
1084 		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1085 			    clock);
1086 		return -EINVAL;
1087 	}
1088 }
1089 
1090 static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state * crtc_state)1091 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1092 {
1093 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1094 	struct intel_shared_dpll *pll;
1095 	enum intel_dpll_id pll_id;
1096 	int clock = crtc_state->port_clock;
1097 
1098 	switch (clock / 2) {
1099 	case 81000:
1100 		pll_id = DPLL_ID_LCPLL_810;
1101 		break;
1102 	case 135000:
1103 		pll_id = DPLL_ID_LCPLL_1350;
1104 		break;
1105 	case 270000:
1106 		pll_id = DPLL_ID_LCPLL_2700;
1107 		break;
1108 	default:
1109 		MISSING_CASE(clock / 2);
1110 		return NULL;
1111 	}
1112 
1113 	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1114 
1115 	if (!pll)
1116 		return NULL;
1117 
1118 	return pll;
1119 }
1120 
hsw_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1121 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1122 				  const struct intel_shared_dpll *pll,
1123 				  const struct intel_dpll_hw_state *dpll_hw_state)
1124 {
1125 	int link_clock = 0;
1126 
1127 	switch (pll->info->id) {
1128 	case DPLL_ID_LCPLL_810:
1129 		link_clock = 81000;
1130 		break;
1131 	case DPLL_ID_LCPLL_1350:
1132 		link_clock = 135000;
1133 		break;
1134 	case DPLL_ID_LCPLL_2700:
1135 		link_clock = 270000;
1136 		break;
1137 	default:
1138 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1139 		break;
1140 	}
1141 
1142 	return link_clock * 2;
1143 }
1144 
1145 static int
hsw_ddi_spll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1146 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1147 			  struct intel_crtc *crtc)
1148 {
1149 	struct intel_crtc_state *crtc_state =
1150 		intel_atomic_get_new_crtc_state(state, crtc);
1151 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1152 
1153 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1154 		return -EINVAL;
1155 
1156 	hw_state->spll =
1157 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1158 
1159 	return 0;
1160 }
1161 
1162 static struct intel_shared_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1163 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1164 		      struct intel_crtc *crtc)
1165 {
1166 	struct intel_crtc_state *crtc_state =
1167 		intel_atomic_get_new_crtc_state(state, crtc);
1168 
1169 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1170 				      BIT(DPLL_ID_SPLL));
1171 }
1172 
hsw_ddi_spll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1173 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1174 				 const struct intel_shared_dpll *pll,
1175 				 const struct intel_dpll_hw_state *dpll_hw_state)
1176 {
1177 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1178 	int link_clock = 0;
1179 
1180 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1181 	case SPLL_FREQ_810MHz:
1182 		link_clock = 81000;
1183 		break;
1184 	case SPLL_FREQ_1350MHz:
1185 		link_clock = 135000;
1186 		break;
1187 	case SPLL_FREQ_2700MHz:
1188 		link_clock = 270000;
1189 		break;
1190 	default:
1191 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1192 		break;
1193 	}
1194 
1195 	return link_clock * 2;
1196 }
1197 
hsw_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1198 static int hsw_compute_dpll(struct intel_atomic_state *state,
1199 			    struct intel_crtc *crtc,
1200 			    struct intel_encoder *encoder)
1201 {
1202 	struct intel_crtc_state *crtc_state =
1203 		intel_atomic_get_new_crtc_state(state, crtc);
1204 
1205 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1206 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1207 	else if (intel_crtc_has_dp_encoder(crtc_state))
1208 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1209 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1210 		return hsw_ddi_spll_compute_dpll(state, crtc);
1211 	else
1212 		return -EINVAL;
1213 }
1214 
hsw_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1215 static int hsw_get_dpll(struct intel_atomic_state *state,
1216 			struct intel_crtc *crtc,
1217 			struct intel_encoder *encoder)
1218 {
1219 	struct intel_crtc_state *crtc_state =
1220 		intel_atomic_get_new_crtc_state(state, crtc);
1221 	struct intel_shared_dpll *pll = NULL;
1222 
1223 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1224 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1225 	else if (intel_crtc_has_dp_encoder(crtc_state))
1226 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1227 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1228 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1229 
1230 	if (!pll)
1231 		return -EINVAL;
1232 
1233 	intel_reference_shared_dpll(state, crtc,
1234 				    pll, &crtc_state->dpll_hw_state);
1235 
1236 	crtc_state->shared_dpll = pll;
1237 
1238 	return 0;
1239 }
1240 
hsw_update_dpll_ref_clks(struct drm_i915_private * i915)1241 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1242 {
1243 	i915->display.dpll.ref_clks.ssc = 135000;
1244 	/* Non-SSC is only used on non-ULT HSW. */
1245 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1246 		i915->display.dpll.ref_clks.nssc = 24000;
1247 	else
1248 		i915->display.dpll.ref_clks.nssc = 135000;
1249 }
1250 
hsw_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1251 static void hsw_dump_hw_state(struct drm_printer *p,
1252 			      const struct intel_dpll_hw_state *dpll_hw_state)
1253 {
1254 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1255 
1256 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1257 		   hw_state->wrpll, hw_state->spll);
1258 }
1259 
hsw_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1260 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1261 				 const struct intel_dpll_hw_state *_b)
1262 {
1263 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1264 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1265 
1266 	return a->wrpll == b->wrpll &&
1267 		a->spll == b->spll;
1268 }
1269 
1270 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1271 	.enable = hsw_ddi_wrpll_enable,
1272 	.disable = hsw_ddi_wrpll_disable,
1273 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1274 	.get_freq = hsw_ddi_wrpll_get_freq,
1275 };
1276 
1277 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1278 	.enable = hsw_ddi_spll_enable,
1279 	.disable = hsw_ddi_spll_disable,
1280 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1281 	.get_freq = hsw_ddi_spll_get_freq,
1282 };
1283 
hsw_ddi_lcpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * hw_state)1284 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1285 				 struct intel_shared_dpll *pll,
1286 				 const struct intel_dpll_hw_state *hw_state)
1287 {
1288 }
1289 
hsw_ddi_lcpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1290 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1291 				  struct intel_shared_dpll *pll)
1292 {
1293 }
1294 
hsw_ddi_lcpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1295 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1296 				       struct intel_shared_dpll *pll,
1297 				       struct intel_dpll_hw_state *dpll_hw_state)
1298 {
1299 	return true;
1300 }
1301 
1302 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1303 	.enable = hsw_ddi_lcpll_enable,
1304 	.disable = hsw_ddi_lcpll_disable,
1305 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1306 	.get_freq = hsw_ddi_lcpll_get_freq,
1307 };
1308 
1309 static const struct dpll_info hsw_plls[] = {
1310 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1311 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1312 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1313 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1314 	  .always_on = true, },
1315 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1316 	  .always_on = true, },
1317 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1318 	  .always_on = true, },
1319 	{}
1320 };
1321 
1322 static const struct intel_dpll_mgr hsw_pll_mgr = {
1323 	.dpll_info = hsw_plls,
1324 	.compute_dplls = hsw_compute_dpll,
1325 	.get_dplls = hsw_get_dpll,
1326 	.put_dplls = intel_put_dpll,
1327 	.update_ref_clks = hsw_update_dpll_ref_clks,
1328 	.dump_hw_state = hsw_dump_hw_state,
1329 	.compare_hw_state = hsw_compare_hw_state,
1330 };
1331 
1332 struct skl_dpll_regs {
1333 	i915_reg_t ctl, cfgcr1, cfgcr2;
1334 };
1335 
1336 /* this array is indexed by the *shared* pll id */
1337 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1338 	{
1339 		/* DPLL 0 */
1340 		.ctl = LCPLL1_CTL,
1341 		/* DPLL 0 doesn't support HDMI mode */
1342 	},
1343 	{
1344 		/* DPLL 1 */
1345 		.ctl = LCPLL2_CTL,
1346 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1347 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1348 	},
1349 	{
1350 		/* DPLL 2 */
1351 		.ctl = WRPLL_CTL(0),
1352 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1353 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1354 	},
1355 	{
1356 		/* DPLL 3 */
1357 		.ctl = WRPLL_CTL(1),
1358 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1359 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1360 	},
1361 };
1362 
skl_ddi_pll_write_ctrl1(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct skl_dpll_hw_state * hw_state)1363 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1364 				    struct intel_shared_dpll *pll,
1365 				    const struct skl_dpll_hw_state *hw_state)
1366 {
1367 	const enum intel_dpll_id id = pll->info->id;
1368 
1369 	intel_de_rmw(i915, DPLL_CTRL1,
1370 		     DPLL_CTRL1_HDMI_MODE(id) |
1371 		     DPLL_CTRL1_SSC(id) |
1372 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1373 		     hw_state->ctrl1 << (id * 6));
1374 	intel_de_posting_read(i915, DPLL_CTRL1);
1375 }
1376 
skl_ddi_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1377 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1378 			       struct intel_shared_dpll *pll,
1379 			       const struct intel_dpll_hw_state *dpll_hw_state)
1380 {
1381 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1382 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1383 	const enum intel_dpll_id id = pll->info->id;
1384 
1385 	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1386 
1387 	intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
1388 	intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
1389 	intel_de_posting_read(i915, regs[id].cfgcr1);
1390 	intel_de_posting_read(i915, regs[id].cfgcr2);
1391 
1392 	/* the enable bit is always bit 31 */
1393 	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1394 
1395 	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1396 		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1397 }
1398 
skl_ddi_dpll0_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1399 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1400 				 struct intel_shared_dpll *pll,
1401 				 const struct intel_dpll_hw_state *dpll_hw_state)
1402 {
1403 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1404 
1405 	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1406 }
1407 
skl_ddi_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1408 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1409 				struct intel_shared_dpll *pll)
1410 {
1411 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1412 	const enum intel_dpll_id id = pll->info->id;
1413 
1414 	/* the enable bit is always bit 31 */
1415 	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1416 	intel_de_posting_read(i915, regs[id].ctl);
1417 }
1418 
skl_ddi_dpll0_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1419 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1420 				  struct intel_shared_dpll *pll)
1421 {
1422 }
1423 
skl_ddi_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1424 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1425 				     struct intel_shared_dpll *pll,
1426 				     struct intel_dpll_hw_state *dpll_hw_state)
1427 {
1428 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1429 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1430 	const enum intel_dpll_id id = pll->info->id;
1431 	intel_wakeref_t wakeref;
1432 	bool ret;
1433 	u32 val;
1434 
1435 	wakeref = intel_display_power_get_if_enabled(i915,
1436 						     POWER_DOMAIN_DISPLAY_CORE);
1437 	if (!wakeref)
1438 		return false;
1439 
1440 	ret = false;
1441 
1442 	val = intel_de_read(i915, regs[id].ctl);
1443 	if (!(val & LCPLL_PLL_ENABLE))
1444 		goto out;
1445 
1446 	val = intel_de_read(i915, DPLL_CTRL1);
1447 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1448 
1449 	/* avoid reading back stale values if HDMI mode is not enabled */
1450 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1451 		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1452 		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1453 	}
1454 	ret = true;
1455 
1456 out:
1457 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1458 
1459 	return ret;
1460 }
1461 
skl_ddi_dpll0_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1462 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1463 				       struct intel_shared_dpll *pll,
1464 				       struct intel_dpll_hw_state *dpll_hw_state)
1465 {
1466 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1467 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1468 	const enum intel_dpll_id id = pll->info->id;
1469 	intel_wakeref_t wakeref;
1470 	u32 val;
1471 	bool ret;
1472 
1473 	wakeref = intel_display_power_get_if_enabled(i915,
1474 						     POWER_DOMAIN_DISPLAY_CORE);
1475 	if (!wakeref)
1476 		return false;
1477 
1478 	ret = false;
1479 
1480 	/* DPLL0 is always enabled since it drives CDCLK */
1481 	val = intel_de_read(i915, regs[id].ctl);
1482 	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1483 		goto out;
1484 
1485 	val = intel_de_read(i915, DPLL_CTRL1);
1486 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1487 
1488 	ret = true;
1489 
1490 out:
1491 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1492 
1493 	return ret;
1494 }
1495 
1496 struct skl_wrpll_context {
1497 	u64 min_deviation;		/* current minimal deviation */
1498 	u64 central_freq;		/* chosen central freq */
1499 	u64 dco_freq;			/* chosen dco freq */
1500 	unsigned int p;			/* chosen divider */
1501 };
1502 
1503 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1504 #define SKL_DCO_MAX_PDEVIATION	100
1505 #define SKL_DCO_MAX_NDEVIATION	600
1506 
skl_wrpll_try_divider(struct skl_wrpll_context * ctx,u64 central_freq,u64 dco_freq,unsigned int divider)1507 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1508 				  u64 central_freq,
1509 				  u64 dco_freq,
1510 				  unsigned int divider)
1511 {
1512 	u64 deviation;
1513 
1514 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1515 			      central_freq);
1516 
1517 	/* positive deviation */
1518 	if (dco_freq >= central_freq) {
1519 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1520 		    deviation < ctx->min_deviation) {
1521 			ctx->min_deviation = deviation;
1522 			ctx->central_freq = central_freq;
1523 			ctx->dco_freq = dco_freq;
1524 			ctx->p = divider;
1525 		}
1526 	/* negative deviation */
1527 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1528 		   deviation < ctx->min_deviation) {
1529 		ctx->min_deviation = deviation;
1530 		ctx->central_freq = central_freq;
1531 		ctx->dco_freq = dco_freq;
1532 		ctx->p = divider;
1533 	}
1534 }
1535 
skl_wrpll_get_multipliers(unsigned int p,unsigned int * p0,unsigned int * p1,unsigned int * p2)1536 static void skl_wrpll_get_multipliers(unsigned int p,
1537 				      unsigned int *p0 /* out */,
1538 				      unsigned int *p1 /* out */,
1539 				      unsigned int *p2 /* out */)
1540 {
1541 	/* even dividers */
1542 	if (p % 2 == 0) {
1543 		unsigned int half = p / 2;
1544 
1545 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1546 			*p0 = 2;
1547 			*p1 = 1;
1548 			*p2 = half;
1549 		} else if (half % 2 == 0) {
1550 			*p0 = 2;
1551 			*p1 = half / 2;
1552 			*p2 = 2;
1553 		} else if (half % 3 == 0) {
1554 			*p0 = 3;
1555 			*p1 = half / 3;
1556 			*p2 = 2;
1557 		} else if (half % 7 == 0) {
1558 			*p0 = 7;
1559 			*p1 = half / 7;
1560 			*p2 = 2;
1561 		}
1562 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1563 		*p0 = 3;
1564 		*p1 = 1;
1565 		*p2 = p / 3;
1566 	} else if (p == 5 || p == 7) {
1567 		*p0 = p;
1568 		*p1 = 1;
1569 		*p2 = 1;
1570 	} else if (p == 15) {
1571 		*p0 = 3;
1572 		*p1 = 1;
1573 		*p2 = 5;
1574 	} else if (p == 21) {
1575 		*p0 = 7;
1576 		*p1 = 1;
1577 		*p2 = 3;
1578 	} else if (p == 35) {
1579 		*p0 = 7;
1580 		*p1 = 1;
1581 		*p2 = 5;
1582 	}
1583 }
1584 
1585 struct skl_wrpll_params {
1586 	u32 dco_fraction;
1587 	u32 dco_integer;
1588 	u32 qdiv_ratio;
1589 	u32 qdiv_mode;
1590 	u32 kdiv;
1591 	u32 pdiv;
1592 	u32 central_freq;
1593 };
1594 
skl_wrpll_params_populate(struct skl_wrpll_params * params,u64 afe_clock,int ref_clock,u64 central_freq,u32 p0,u32 p1,u32 p2)1595 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1596 				      u64 afe_clock,
1597 				      int ref_clock,
1598 				      u64 central_freq,
1599 				      u32 p0, u32 p1, u32 p2)
1600 {
1601 	u64 dco_freq;
1602 
1603 	switch (central_freq) {
1604 	case 9600000000ULL:
1605 		params->central_freq = 0;
1606 		break;
1607 	case 9000000000ULL:
1608 		params->central_freq = 1;
1609 		break;
1610 	case 8400000000ULL:
1611 		params->central_freq = 3;
1612 	}
1613 
1614 	switch (p0) {
1615 	case 1:
1616 		params->pdiv = 0;
1617 		break;
1618 	case 2:
1619 		params->pdiv = 1;
1620 		break;
1621 	case 3:
1622 		params->pdiv = 2;
1623 		break;
1624 	case 7:
1625 		params->pdiv = 4;
1626 		break;
1627 	default:
1628 		WARN(1, "Incorrect PDiv\n");
1629 	}
1630 
1631 	switch (p2) {
1632 	case 5:
1633 		params->kdiv = 0;
1634 		break;
1635 	case 2:
1636 		params->kdiv = 1;
1637 		break;
1638 	case 3:
1639 		params->kdiv = 2;
1640 		break;
1641 	case 1:
1642 		params->kdiv = 3;
1643 		break;
1644 	default:
1645 		WARN(1, "Incorrect KDiv\n");
1646 	}
1647 
1648 	params->qdiv_ratio = p1;
1649 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1650 
1651 	dco_freq = p0 * p1 * p2 * afe_clock;
1652 
1653 	/*
1654 	 * Intermediate values are in Hz.
1655 	 * Divide by MHz to match bsepc
1656 	 */
1657 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1658 	params->dco_fraction =
1659 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1660 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1661 }
1662 
1663 static int
skl_ddi_calculate_wrpll(int clock,int ref_clock,struct skl_wrpll_params * wrpll_params)1664 skl_ddi_calculate_wrpll(int clock,
1665 			int ref_clock,
1666 			struct skl_wrpll_params *wrpll_params)
1667 {
1668 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1669 						 9000000000ULL,
1670 						 9600000000ULL };
1671 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1672 					    24, 28, 30, 32, 36, 40, 42, 44,
1673 					    48, 52, 54, 56, 60, 64, 66, 68,
1674 					    70, 72, 76, 78, 80, 84, 88, 90,
1675 					    92, 96, 98 };
1676 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1677 	static const struct {
1678 		const u8 *list;
1679 		int n_dividers;
1680 	} dividers[] = {
1681 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1682 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1683 	};
1684 	struct skl_wrpll_context ctx = {
1685 		.min_deviation = U64_MAX,
1686 	};
1687 	unsigned int dco, d, i;
1688 	unsigned int p0, p1, p2;
1689 	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1690 
1691 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1692 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1693 			for (i = 0; i < dividers[d].n_dividers; i++) {
1694 				unsigned int p = dividers[d].list[i];
1695 				u64 dco_freq = p * afe_clock;
1696 
1697 				skl_wrpll_try_divider(&ctx,
1698 						      dco_central_freq[dco],
1699 						      dco_freq,
1700 						      p);
1701 				/*
1702 				 * Skip the remaining dividers if we're sure to
1703 				 * have found the definitive divider, we can't
1704 				 * improve a 0 deviation.
1705 				 */
1706 				if (ctx.min_deviation == 0)
1707 					goto skip_remaining_dividers;
1708 			}
1709 		}
1710 
1711 skip_remaining_dividers:
1712 		/*
1713 		 * If a solution is found with an even divider, prefer
1714 		 * this one.
1715 		 */
1716 		if (d == 0 && ctx.p)
1717 			break;
1718 	}
1719 
1720 	if (!ctx.p)
1721 		return -EINVAL;
1722 
1723 	/*
1724 	 * gcc incorrectly analyses that these can be used without being
1725 	 * initialized. To be fair, it's hard to guess.
1726 	 */
1727 	p0 = p1 = p2 = 0;
1728 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1729 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1730 				  ctx.central_freq, p0, p1, p2);
1731 
1732 	return 0;
1733 }
1734 
skl_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1735 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1736 				  const struct intel_shared_dpll *pll,
1737 				  const struct intel_dpll_hw_state *dpll_hw_state)
1738 {
1739 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1740 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1741 	u32 p0, p1, p2, dco_freq;
1742 
1743 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1744 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1745 
1746 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1747 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1748 	else
1749 		p1 = 1;
1750 
1751 
1752 	switch (p0) {
1753 	case DPLL_CFGCR2_PDIV_1:
1754 		p0 = 1;
1755 		break;
1756 	case DPLL_CFGCR2_PDIV_2:
1757 		p0 = 2;
1758 		break;
1759 	case DPLL_CFGCR2_PDIV_3:
1760 		p0 = 3;
1761 		break;
1762 	case DPLL_CFGCR2_PDIV_7_INVALID:
1763 		/*
1764 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1765 		 * handling it the same way as PDIV_7.
1766 		 */
1767 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1768 		fallthrough;
1769 	case DPLL_CFGCR2_PDIV_7:
1770 		p0 = 7;
1771 		break;
1772 	default:
1773 		MISSING_CASE(p0);
1774 		return 0;
1775 	}
1776 
1777 	switch (p2) {
1778 	case DPLL_CFGCR2_KDIV_5:
1779 		p2 = 5;
1780 		break;
1781 	case DPLL_CFGCR2_KDIV_2:
1782 		p2 = 2;
1783 		break;
1784 	case DPLL_CFGCR2_KDIV_3:
1785 		p2 = 3;
1786 		break;
1787 	case DPLL_CFGCR2_KDIV_1:
1788 		p2 = 1;
1789 		break;
1790 	default:
1791 		MISSING_CASE(p2);
1792 		return 0;
1793 	}
1794 
1795 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1796 		   ref_clock;
1797 
1798 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1799 		    ref_clock / 0x8000;
1800 
1801 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1802 		return 0;
1803 
1804 	return dco_freq / (p0 * p1 * p2 * 5);
1805 }
1806 
skl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)1807 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1808 {
1809 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1810 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1811 	struct skl_wrpll_params wrpll_params = {};
1812 	int ret;
1813 
1814 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1815 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1816 	if (ret)
1817 		return ret;
1818 
1819 	/*
1820 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1821 	 * as the DPLL id in this function.
1822 	 */
1823 	hw_state->ctrl1 =
1824 		DPLL_CTRL1_OVERRIDE(0) |
1825 		DPLL_CTRL1_HDMI_MODE(0);
1826 
1827 	hw_state->cfgcr1 =
1828 		DPLL_CFGCR1_FREQ_ENABLE |
1829 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1830 		wrpll_params.dco_integer;
1831 
1832 	hw_state->cfgcr2 =
1833 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1834 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1835 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1836 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1837 		wrpll_params.central_freq;
1838 
1839 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1840 							&crtc_state->dpll_hw_state);
1841 
1842 	return 0;
1843 }
1844 
1845 static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1846 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1847 {
1848 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1849 	u32 ctrl1;
1850 
1851 	/*
1852 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1853 	 * as the DPLL id in this function.
1854 	 */
1855 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1856 	switch (crtc_state->port_clock / 2) {
1857 	case 81000:
1858 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1859 		break;
1860 	case 135000:
1861 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1862 		break;
1863 	case 270000:
1864 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1865 		break;
1866 		/* eDP 1.4 rates */
1867 	case 162000:
1868 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1869 		break;
1870 	case 108000:
1871 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1872 		break;
1873 	case 216000:
1874 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1875 		break;
1876 	}
1877 
1878 	hw_state->ctrl1 = ctrl1;
1879 
1880 	return 0;
1881 }
1882 
skl_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1883 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1884 				  const struct intel_shared_dpll *pll,
1885 				  const struct intel_dpll_hw_state *dpll_hw_state)
1886 {
1887 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1888 	int link_clock = 0;
1889 
1890 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1891 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1892 	case DPLL_CTRL1_LINK_RATE_810:
1893 		link_clock = 81000;
1894 		break;
1895 	case DPLL_CTRL1_LINK_RATE_1080:
1896 		link_clock = 108000;
1897 		break;
1898 	case DPLL_CTRL1_LINK_RATE_1350:
1899 		link_clock = 135000;
1900 		break;
1901 	case DPLL_CTRL1_LINK_RATE_1620:
1902 		link_clock = 162000;
1903 		break;
1904 	case DPLL_CTRL1_LINK_RATE_2160:
1905 		link_clock = 216000;
1906 		break;
1907 	case DPLL_CTRL1_LINK_RATE_2700:
1908 		link_clock = 270000;
1909 		break;
1910 	default:
1911 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1912 		break;
1913 	}
1914 
1915 	return link_clock * 2;
1916 }
1917 
skl_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1918 static int skl_compute_dpll(struct intel_atomic_state *state,
1919 			    struct intel_crtc *crtc,
1920 			    struct intel_encoder *encoder)
1921 {
1922 	struct intel_crtc_state *crtc_state =
1923 		intel_atomic_get_new_crtc_state(state, crtc);
1924 
1925 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1926 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1927 	else if (intel_crtc_has_dp_encoder(crtc_state))
1928 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1929 	else
1930 		return -EINVAL;
1931 }
1932 
skl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1933 static int skl_get_dpll(struct intel_atomic_state *state,
1934 			struct intel_crtc *crtc,
1935 			struct intel_encoder *encoder)
1936 {
1937 	struct intel_crtc_state *crtc_state =
1938 		intel_atomic_get_new_crtc_state(state, crtc);
1939 	struct intel_shared_dpll *pll;
1940 
1941 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1942 		pll = intel_find_shared_dpll(state, crtc,
1943 					     &crtc_state->dpll_hw_state,
1944 					     BIT(DPLL_ID_SKL_DPLL0));
1945 	else
1946 		pll = intel_find_shared_dpll(state, crtc,
1947 					     &crtc_state->dpll_hw_state,
1948 					     BIT(DPLL_ID_SKL_DPLL3) |
1949 					     BIT(DPLL_ID_SKL_DPLL2) |
1950 					     BIT(DPLL_ID_SKL_DPLL1));
1951 	if (!pll)
1952 		return -EINVAL;
1953 
1954 	intel_reference_shared_dpll(state, crtc,
1955 				    pll, &crtc_state->dpll_hw_state);
1956 
1957 	crtc_state->shared_dpll = pll;
1958 
1959 	return 0;
1960 }
1961 
skl_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1962 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1963 				const struct intel_shared_dpll *pll,
1964 				const struct intel_dpll_hw_state *dpll_hw_state)
1965 {
1966 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1967 
1968 	/*
1969 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1970 	 * the internal shift for each field
1971 	 */
1972 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1973 		return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
1974 	else
1975 		return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
1976 }
1977 
skl_update_dpll_ref_clks(struct drm_i915_private * i915)1978 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1979 {
1980 	/* No SSC ref */
1981 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1982 }
1983 
skl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1984 static void skl_dump_hw_state(struct drm_printer *p,
1985 			      const struct intel_dpll_hw_state *dpll_hw_state)
1986 {
1987 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1988 
1989 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1990 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1991 }
1992 
skl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1993 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1994 				 const struct intel_dpll_hw_state *_b)
1995 {
1996 	const struct skl_dpll_hw_state *a = &_a->skl;
1997 	const struct skl_dpll_hw_state *b = &_b->skl;
1998 
1999 	return a->ctrl1 == b->ctrl1 &&
2000 		a->cfgcr1 == b->cfgcr1 &&
2001 		a->cfgcr2 == b->cfgcr2;
2002 }
2003 
2004 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2005 	.enable = skl_ddi_pll_enable,
2006 	.disable = skl_ddi_pll_disable,
2007 	.get_hw_state = skl_ddi_pll_get_hw_state,
2008 	.get_freq = skl_ddi_pll_get_freq,
2009 };
2010 
2011 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2012 	.enable = skl_ddi_dpll0_enable,
2013 	.disable = skl_ddi_dpll0_disable,
2014 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2015 	.get_freq = skl_ddi_pll_get_freq,
2016 };
2017 
2018 static const struct dpll_info skl_plls[] = {
2019 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2020 	  .always_on = true, },
2021 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2022 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2023 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2024 	{}
2025 };
2026 
2027 static const struct intel_dpll_mgr skl_pll_mgr = {
2028 	.dpll_info = skl_plls,
2029 	.compute_dplls = skl_compute_dpll,
2030 	.get_dplls = skl_get_dpll,
2031 	.put_dplls = intel_put_dpll,
2032 	.update_ref_clks = skl_update_dpll_ref_clks,
2033 	.dump_hw_state = skl_dump_hw_state,
2034 	.compare_hw_state = skl_compare_hw_state,
2035 };
2036 
bxt_ddi_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2037 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
2038 			       struct intel_shared_dpll *pll,
2039 			       const struct intel_dpll_hw_state *dpll_hw_state)
2040 {
2041 	struct intel_display *display = &i915->display;
2042 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2043 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2044 	enum dpio_phy phy;
2045 	enum dpio_channel ch;
2046 	u32 temp;
2047 
2048 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2049 
2050 	/* Non-SSC reference */
2051 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2052 
2053 	if (IS_GEMINILAKE(i915)) {
2054 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2055 			     0, PORT_PLL_POWER_ENABLE);
2056 
2057 		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2058 				 PORT_PLL_POWER_STATE), 200))
2059 			drm_err(&i915->drm,
2060 				"Power state not set for PLL:%d\n", port);
2061 	}
2062 
2063 	/* Disable 10 bit clock */
2064 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2065 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2066 
2067 	/* Write P1 & P2 */
2068 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2069 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2070 
2071 	/* Write M2 integer */
2072 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2073 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2074 
2075 	/* Write N */
2076 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2077 		     PORT_PLL_N_MASK, hw_state->pll1);
2078 
2079 	/* Write M2 fraction */
2080 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2081 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2082 
2083 	/* Write M2 fraction enable */
2084 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2085 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2086 
2087 	/* Write coeff */
2088 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2089 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2090 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2091 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2092 	temp |= hw_state->pll6;
2093 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2094 
2095 	/* Write calibration val */
2096 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2097 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2098 
2099 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2100 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2101 
2102 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2103 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2104 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2105 	temp |= hw_state->pll10;
2106 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2107 
2108 	/* Recalibrate with new settings */
2109 	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2110 	temp |= PORT_PLL_RECALIBRATE;
2111 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2112 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2113 	temp |= hw_state->ebb4;
2114 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2115 
2116 	/* Enable PLL */
2117 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2118 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2119 
2120 	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2121 			200))
2122 		drm_err(&i915->drm, "PLL %d not locked\n", port);
2123 
2124 	if (IS_GEMINILAKE(i915)) {
2125 		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2126 		temp |= DCC_DELAY_RANGE_2;
2127 		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2128 	}
2129 
2130 	/*
2131 	 * While we write to the group register to program all lanes at once we
2132 	 * can read only lane registers and we pick lanes 0/1 for that.
2133 	 */
2134 	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2135 	temp &= ~LANE_STAGGER_MASK;
2136 	temp &= ~LANESTAGGER_STRAP_OVRD;
2137 	temp |= hw_state->pcsdw12;
2138 	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2139 }
2140 
bxt_ddi_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)2141 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2142 				struct intel_shared_dpll *pll)
2143 {
2144 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2145 
2146 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2147 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2148 
2149 	if (IS_GEMINILAKE(i915)) {
2150 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2151 			     PORT_PLL_POWER_ENABLE, 0);
2152 
2153 		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2154 				  PORT_PLL_POWER_STATE), 200))
2155 			drm_err(&i915->drm,
2156 				"Power state not reset for PLL:%d\n", port);
2157 	}
2158 }
2159 
bxt_ddi_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)2160 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2161 				     struct intel_shared_dpll *pll,
2162 				     struct intel_dpll_hw_state *dpll_hw_state)
2163 {
2164 	struct intel_display *display = &i915->display;
2165 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2166 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2167 	intel_wakeref_t wakeref;
2168 	enum dpio_phy phy;
2169 	enum dpio_channel ch;
2170 	u32 val;
2171 	bool ret;
2172 
2173 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2174 
2175 	wakeref = intel_display_power_get_if_enabled(i915,
2176 						     POWER_DOMAIN_DISPLAY_CORE);
2177 	if (!wakeref)
2178 		return false;
2179 
2180 	ret = false;
2181 
2182 	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2183 	if (!(val & PORT_PLL_ENABLE))
2184 		goto out;
2185 
2186 	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2187 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2188 
2189 	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2190 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2191 
2192 	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2193 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2194 
2195 	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2196 	hw_state->pll1 &= PORT_PLL_N_MASK;
2197 
2198 	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2199 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2200 
2201 	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2202 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2203 
2204 	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2205 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2206 			  PORT_PLL_INT_COEFF_MASK |
2207 			  PORT_PLL_GAIN_CTL_MASK;
2208 
2209 	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2210 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2211 
2212 	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2213 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2214 
2215 	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2216 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2217 			   PORT_PLL_DCO_AMP_MASK;
2218 
2219 	/*
2220 	 * While we write to the group register to program all lanes at once we
2221 	 * can read only lane registers. We configure all lanes the same way, so
2222 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2223 	 */
2224 	hw_state->pcsdw12 = intel_de_read(i915,
2225 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2226 	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2227 		drm_dbg(&i915->drm,
2228 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2229 			hw_state->pcsdw12,
2230 			intel_de_read(i915,
2231 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2232 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2233 
2234 	ret = true;
2235 
2236 out:
2237 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2238 
2239 	return ret;
2240 }
2241 
2242 /* pre-calculated values for DP linkrates */
2243 static const struct dpll bxt_dp_clk_val[] = {
2244 	/* m2 is .22 binary fixed point */
2245 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2246 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2247 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2248 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2249 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2250 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2251 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2252 };
2253 
2254 static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2255 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2256 			  struct dpll *clk_div)
2257 {
2258 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2259 
2260 	/* Calculate HDMI div */
2261 	/*
2262 	 * FIXME: tie the following calculation into
2263 	 * i9xx_crtc_compute_clock
2264 	 */
2265 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2266 		return -EINVAL;
2267 
2268 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2269 
2270 	return 0;
2271 }
2272 
bxt_ddi_dp_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2273 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2274 				    struct dpll *clk_div)
2275 {
2276 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2277 	int i;
2278 
2279 	*clk_div = bxt_dp_clk_val[0];
2280 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2281 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2282 			*clk_div = bxt_dp_clk_val[i];
2283 			break;
2284 		}
2285 	}
2286 
2287 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2288 
2289 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2290 		    clk_div->dot != crtc_state->port_clock);
2291 }
2292 
bxt_ddi_set_dpll_hw_state(struct intel_crtc_state * crtc_state,const struct dpll * clk_div)2293 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2294 				     const struct dpll *clk_div)
2295 {
2296 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2297 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2298 	int clock = crtc_state->port_clock;
2299 	int vco = clk_div->vco;
2300 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2301 	u32 lanestagger;
2302 
2303 	if (vco >= 6200000 && vco <= 6700000) {
2304 		prop_coef = 4;
2305 		int_coef = 9;
2306 		gain_ctl = 3;
2307 		targ_cnt = 8;
2308 	} else if ((vco > 5400000 && vco < 6200000) ||
2309 			(vco >= 4800000 && vco < 5400000)) {
2310 		prop_coef = 5;
2311 		int_coef = 11;
2312 		gain_ctl = 3;
2313 		targ_cnt = 9;
2314 	} else if (vco == 5400000) {
2315 		prop_coef = 3;
2316 		int_coef = 8;
2317 		gain_ctl = 1;
2318 		targ_cnt = 9;
2319 	} else {
2320 		drm_err(&i915->drm, "Invalid VCO\n");
2321 		return -EINVAL;
2322 	}
2323 
2324 	if (clock > 270000)
2325 		lanestagger = 0x18;
2326 	else if (clock > 135000)
2327 		lanestagger = 0x0d;
2328 	else if (clock > 67000)
2329 		lanestagger = 0x07;
2330 	else if (clock > 33000)
2331 		lanestagger = 0x04;
2332 	else
2333 		lanestagger = 0x02;
2334 
2335 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2336 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2337 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2338 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2339 
2340 	if (clk_div->m2 & 0x3fffff)
2341 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2342 
2343 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2344 		PORT_PLL_INT_COEFF(int_coef) |
2345 		PORT_PLL_GAIN_CTL(gain_ctl);
2346 
2347 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2348 
2349 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2350 
2351 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2352 		PORT_PLL_DCO_AMP_OVR_EN_H;
2353 
2354 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2355 
2356 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2357 
2358 	return 0;
2359 }
2360 
bxt_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2361 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2362 				const struct intel_shared_dpll *pll,
2363 				const struct intel_dpll_hw_state *dpll_hw_state)
2364 {
2365 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2366 	struct dpll clock;
2367 
2368 	clock.m1 = 2;
2369 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2370 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2371 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2372 					  hw_state->pll2);
2373 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2374 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2375 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2376 
2377 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2378 }
2379 
2380 static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2381 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2382 {
2383 	struct dpll clk_div = {};
2384 
2385 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2386 
2387 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2388 }
2389 
2390 static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2391 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2392 {
2393 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2394 	struct dpll clk_div = {};
2395 	int ret;
2396 
2397 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2398 
2399 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2400 	if (ret)
2401 		return ret;
2402 
2403 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2404 						      &crtc_state->dpll_hw_state);
2405 
2406 	return 0;
2407 }
2408 
bxt_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2409 static int bxt_compute_dpll(struct intel_atomic_state *state,
2410 			    struct intel_crtc *crtc,
2411 			    struct intel_encoder *encoder)
2412 {
2413 	struct intel_crtc_state *crtc_state =
2414 		intel_atomic_get_new_crtc_state(state, crtc);
2415 
2416 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2417 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2418 	else if (intel_crtc_has_dp_encoder(crtc_state))
2419 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2420 	else
2421 		return -EINVAL;
2422 }
2423 
bxt_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2424 static int bxt_get_dpll(struct intel_atomic_state *state,
2425 			struct intel_crtc *crtc,
2426 			struct intel_encoder *encoder)
2427 {
2428 	struct intel_crtc_state *crtc_state =
2429 		intel_atomic_get_new_crtc_state(state, crtc);
2430 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2431 	struct intel_shared_dpll *pll;
2432 	enum intel_dpll_id id;
2433 
2434 	/* 1:1 mapping between ports and PLLs */
2435 	id = (enum intel_dpll_id) encoder->port;
2436 	pll = intel_get_shared_dpll_by_id(i915, id);
2437 
2438 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2439 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2440 
2441 	intel_reference_shared_dpll(state, crtc,
2442 				    pll, &crtc_state->dpll_hw_state);
2443 
2444 	crtc_state->shared_dpll = pll;
2445 
2446 	return 0;
2447 }
2448 
bxt_update_dpll_ref_clks(struct drm_i915_private * i915)2449 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2450 {
2451 	i915->display.dpll.ref_clks.ssc = 100000;
2452 	i915->display.dpll.ref_clks.nssc = 100000;
2453 	/* DSI non-SSC ref 19.2MHz */
2454 }
2455 
bxt_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)2456 static void bxt_dump_hw_state(struct drm_printer *p,
2457 			      const struct intel_dpll_hw_state *dpll_hw_state)
2458 {
2459 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2460 
2461 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2462 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2463 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2464 		   hw_state->ebb0, hw_state->ebb4,
2465 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2466 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2467 		   hw_state->pcsdw12);
2468 }
2469 
bxt_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)2470 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2471 				 const struct intel_dpll_hw_state *_b)
2472 {
2473 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2474 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2475 
2476 	return a->ebb0 == b->ebb0 &&
2477 		a->ebb4 == b->ebb4 &&
2478 		a->pll0 == b->pll0 &&
2479 		a->pll1 == b->pll1 &&
2480 		a->pll2 == b->pll2 &&
2481 		a->pll3 == b->pll3 &&
2482 		a->pll6 == b->pll6 &&
2483 		a->pll8 == b->pll8 &&
2484 		a->pll10 == b->pll10 &&
2485 		a->pcsdw12 == b->pcsdw12;
2486 }
2487 
2488 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2489 	.enable = bxt_ddi_pll_enable,
2490 	.disable = bxt_ddi_pll_disable,
2491 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2492 	.get_freq = bxt_ddi_pll_get_freq,
2493 };
2494 
2495 static const struct dpll_info bxt_plls[] = {
2496 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2497 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2498 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2499 	{}
2500 };
2501 
2502 static const struct intel_dpll_mgr bxt_pll_mgr = {
2503 	.dpll_info = bxt_plls,
2504 	.compute_dplls = bxt_compute_dpll,
2505 	.get_dplls = bxt_get_dpll,
2506 	.put_dplls = intel_put_dpll,
2507 	.update_ref_clks = bxt_update_dpll_ref_clks,
2508 	.dump_hw_state = bxt_dump_hw_state,
2509 	.compare_hw_state = bxt_compare_hw_state,
2510 };
2511 
icl_wrpll_get_multipliers(int bestdiv,int * pdiv,int * qdiv,int * kdiv)2512 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2513 				      int *qdiv, int *kdiv)
2514 {
2515 	/* even dividers */
2516 	if (bestdiv % 2 == 0) {
2517 		if (bestdiv == 2) {
2518 			*pdiv = 2;
2519 			*qdiv = 1;
2520 			*kdiv = 1;
2521 		} else if (bestdiv % 4 == 0) {
2522 			*pdiv = 2;
2523 			*qdiv = bestdiv / 4;
2524 			*kdiv = 2;
2525 		} else if (bestdiv % 6 == 0) {
2526 			*pdiv = 3;
2527 			*qdiv = bestdiv / 6;
2528 			*kdiv = 2;
2529 		} else if (bestdiv % 5 == 0) {
2530 			*pdiv = 5;
2531 			*qdiv = bestdiv / 10;
2532 			*kdiv = 2;
2533 		} else if (bestdiv % 14 == 0) {
2534 			*pdiv = 7;
2535 			*qdiv = bestdiv / 14;
2536 			*kdiv = 2;
2537 		}
2538 	} else {
2539 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2540 			*pdiv = bestdiv;
2541 			*qdiv = 1;
2542 			*kdiv = 1;
2543 		} else { /* 9, 15, 21 */
2544 			*pdiv = bestdiv / 3;
2545 			*qdiv = 1;
2546 			*kdiv = 3;
2547 		}
2548 	}
2549 }
2550 
icl_wrpll_params_populate(struct skl_wrpll_params * params,u32 dco_freq,u32 ref_freq,int pdiv,int qdiv,int kdiv)2551 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2552 				      u32 dco_freq, u32 ref_freq,
2553 				      int pdiv, int qdiv, int kdiv)
2554 {
2555 	u32 dco;
2556 
2557 	switch (kdiv) {
2558 	case 1:
2559 		params->kdiv = 1;
2560 		break;
2561 	case 2:
2562 		params->kdiv = 2;
2563 		break;
2564 	case 3:
2565 		params->kdiv = 4;
2566 		break;
2567 	default:
2568 		WARN(1, "Incorrect KDiv\n");
2569 	}
2570 
2571 	switch (pdiv) {
2572 	case 2:
2573 		params->pdiv = 1;
2574 		break;
2575 	case 3:
2576 		params->pdiv = 2;
2577 		break;
2578 	case 5:
2579 		params->pdiv = 4;
2580 		break;
2581 	case 7:
2582 		params->pdiv = 8;
2583 		break;
2584 	default:
2585 		WARN(1, "Incorrect PDiv\n");
2586 	}
2587 
2588 	WARN_ON(kdiv != 2 && qdiv != 1);
2589 
2590 	params->qdiv_ratio = qdiv;
2591 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2592 
2593 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2594 
2595 	params->dco_integer = dco >> 15;
2596 	params->dco_fraction = dco & 0x7fff;
2597 }
2598 
2599 /*
2600  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2601  * Program half of the nominal DCO divider fraction value.
2602  */
2603 static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private * i915)2604 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2605 {
2606 	return ((IS_ELKHARTLAKE(i915) &&
2607 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2608 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2609 		 i915->display.dpll.ref_clks.nssc == 38400;
2610 }
2611 
2612 struct icl_combo_pll_params {
2613 	int clock;
2614 	struct skl_wrpll_params wrpll;
2615 };
2616 
2617 /*
2618  * These values alrea already adjusted: they're the bits we write to the
2619  * registers, not the logical values.
2620  */
2621 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2622 	{ 540000,
2623 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2624 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2625 	{ 270000,
2626 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2627 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2628 	{ 162000,
2629 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2630 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2631 	{ 324000,
2632 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2633 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2634 	{ 216000,
2635 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2636 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2637 	{ 432000,
2638 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2639 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2640 	{ 648000,
2641 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2642 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2643 	{ 810000,
2644 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2645 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2646 };
2647 
2648 
2649 /* Also used for 38.4 MHz values. */
2650 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2651 	{ 540000,
2652 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2653 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2654 	{ 270000,
2655 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2656 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2657 	{ 162000,
2658 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2659 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2660 	{ 324000,
2661 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2662 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2663 	{ 216000,
2664 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2665 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2666 	{ 432000,
2667 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2668 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2669 	{ 648000,
2670 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2671 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2672 	{ 810000,
2673 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2674 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2675 };
2676 
2677 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2678 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2679 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2680 };
2681 
2682 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2683 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2684 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2685 };
2686 
2687 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2688 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2689 	/* the following params are unused */
2690 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2691 };
2692 
2693 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2694 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2695 	/* the following params are unused */
2696 };
2697 
icl_calc_dp_combo_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2698 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2699 				 struct skl_wrpll_params *pll_params)
2700 {
2701 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2702 	const struct icl_combo_pll_params *params =
2703 		i915->display.dpll.ref_clks.nssc == 24000 ?
2704 		icl_dp_combo_pll_24MHz_values :
2705 		icl_dp_combo_pll_19_2MHz_values;
2706 	int clock = crtc_state->port_clock;
2707 	int i;
2708 
2709 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2710 		if (clock == params[i].clock) {
2711 			*pll_params = params[i].wrpll;
2712 			return 0;
2713 		}
2714 	}
2715 
2716 	MISSING_CASE(clock);
2717 	return -EINVAL;
2718 }
2719 
icl_calc_tbt_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2720 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2721 			    struct skl_wrpll_params *pll_params)
2722 {
2723 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2724 
2725 	if (DISPLAY_VER(i915) >= 12) {
2726 		switch (i915->display.dpll.ref_clks.nssc) {
2727 		default:
2728 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2729 			fallthrough;
2730 		case 19200:
2731 		case 38400:
2732 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2733 			break;
2734 		case 24000:
2735 			*pll_params = tgl_tbt_pll_24MHz_values;
2736 			break;
2737 		}
2738 	} else {
2739 		switch (i915->display.dpll.ref_clks.nssc) {
2740 		default:
2741 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2742 			fallthrough;
2743 		case 19200:
2744 		case 38400:
2745 			*pll_params = icl_tbt_pll_19_2MHz_values;
2746 			break;
2747 		case 24000:
2748 			*pll_params = icl_tbt_pll_24MHz_values;
2749 			break;
2750 		}
2751 	}
2752 
2753 	return 0;
2754 }
2755 
icl_ddi_tbt_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2756 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2757 				    const struct intel_shared_dpll *pll,
2758 				    const struct intel_dpll_hw_state *dpll_hw_state)
2759 {
2760 	/*
2761 	 * The PLL outputs multiple frequencies at the same time, selection is
2762 	 * made at DDI clock mux level.
2763 	 */
2764 	drm_WARN_ON(&i915->drm, 1);
2765 
2766 	return 0;
2767 }
2768 
icl_wrpll_ref_clock(struct drm_i915_private * i915)2769 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2770 {
2771 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2772 
2773 	/*
2774 	 * For ICL+, the spec states: if reference frequency is 38.4,
2775 	 * use 19.2 because the DPLL automatically divides that by 2.
2776 	 */
2777 	if (ref_clock == 38400)
2778 		ref_clock = 19200;
2779 
2780 	return ref_clock;
2781 }
2782 
2783 static int
icl_calc_wrpll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * wrpll_params)2784 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2785 	       struct skl_wrpll_params *wrpll_params)
2786 {
2787 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2788 	int ref_clock = icl_wrpll_ref_clock(i915);
2789 	u32 afe_clock = crtc_state->port_clock * 5;
2790 	u32 dco_min = 7998000;
2791 	u32 dco_max = 10000000;
2792 	u32 dco_mid = (dco_min + dco_max) / 2;
2793 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2794 					 18, 20, 24, 28, 30, 32,  36,  40,
2795 					 42, 44, 48, 50, 52, 54,  56,  60,
2796 					 64, 66, 68, 70, 72, 76,  78,  80,
2797 					 84, 88, 90, 92, 96, 98, 100, 102,
2798 					  3,  5,  7,  9, 15, 21 };
2799 	u32 dco, best_dco = 0, dco_centrality = 0;
2800 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2801 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2802 
2803 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2804 		dco = afe_clock * dividers[d];
2805 
2806 		if (dco <= dco_max && dco >= dco_min) {
2807 			dco_centrality = abs(dco - dco_mid);
2808 
2809 			if (dco_centrality < best_dco_centrality) {
2810 				best_dco_centrality = dco_centrality;
2811 				best_div = dividers[d];
2812 				best_dco = dco;
2813 			}
2814 		}
2815 	}
2816 
2817 	if (best_div == 0)
2818 		return -EINVAL;
2819 
2820 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2821 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2822 				  pdiv, qdiv, kdiv);
2823 
2824 	return 0;
2825 }
2826 
icl_ddi_combo_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2827 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2828 				      const struct intel_shared_dpll *pll,
2829 				      const struct intel_dpll_hw_state *dpll_hw_state)
2830 {
2831 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2832 	int ref_clock = icl_wrpll_ref_clock(i915);
2833 	u32 dco_fraction;
2834 	u32 p0, p1, p2, dco_freq;
2835 
2836 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2837 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2838 
2839 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2840 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2841 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2842 	else
2843 		p1 = 1;
2844 
2845 	switch (p0) {
2846 	case DPLL_CFGCR1_PDIV_2:
2847 		p0 = 2;
2848 		break;
2849 	case DPLL_CFGCR1_PDIV_3:
2850 		p0 = 3;
2851 		break;
2852 	case DPLL_CFGCR1_PDIV_5:
2853 		p0 = 5;
2854 		break;
2855 	case DPLL_CFGCR1_PDIV_7:
2856 		p0 = 7;
2857 		break;
2858 	}
2859 
2860 	switch (p2) {
2861 	case DPLL_CFGCR1_KDIV_1:
2862 		p2 = 1;
2863 		break;
2864 	case DPLL_CFGCR1_KDIV_2:
2865 		p2 = 2;
2866 		break;
2867 	case DPLL_CFGCR1_KDIV_3:
2868 		p2 = 3;
2869 		break;
2870 	}
2871 
2872 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2873 		   ref_clock;
2874 
2875 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2876 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2877 
2878 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2879 		dco_fraction *= 2;
2880 
2881 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2882 
2883 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2884 		return 0;
2885 
2886 	return dco_freq / (p0 * p1 * p2 * 5);
2887 }
2888 
icl_calc_dpll_state(struct drm_i915_private * i915,const struct skl_wrpll_params * pll_params,struct intel_dpll_hw_state * dpll_hw_state)2889 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2890 				const struct skl_wrpll_params *pll_params,
2891 				struct intel_dpll_hw_state *dpll_hw_state)
2892 {
2893 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2894 	u32 dco_fraction = pll_params->dco_fraction;
2895 
2896 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2897 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2898 
2899 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2900 			    pll_params->dco_integer;
2901 
2902 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2903 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2904 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2905 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2906 
2907 	if (DISPLAY_VER(i915) >= 12)
2908 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2909 	else
2910 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2911 
2912 	if (i915->display.vbt.override_afc_startup)
2913 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2914 }
2915 
icl_mg_pll_find_divisors(int clock_khz,bool is_dp,bool use_ssc,u32 * target_dco_khz,struct icl_dpll_hw_state * hw_state,bool is_dkl)2916 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2917 				    u32 *target_dco_khz,
2918 				    struct icl_dpll_hw_state *hw_state,
2919 				    bool is_dkl)
2920 {
2921 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2922 	u32 dco_min_freq, dco_max_freq;
2923 	unsigned int i;
2924 	int div2;
2925 
2926 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2927 	dco_max_freq = is_dp ? 8100000 : 10000000;
2928 
2929 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2930 		int div1 = div1_vals[i];
2931 
2932 		for (div2 = 10; div2 > 0; div2--) {
2933 			int dco = div1 * div2 * clock_khz * 5;
2934 			int a_divratio, tlinedrv, inputsel;
2935 			u32 hsdiv;
2936 
2937 			if (dco < dco_min_freq || dco > dco_max_freq)
2938 				continue;
2939 
2940 			if (div2 >= 2) {
2941 				/*
2942 				 * Note: a_divratio not matching TGL BSpec
2943 				 * algorithm but matching hardcoded values and
2944 				 * working on HW for DP alt-mode at least
2945 				 */
2946 				a_divratio = is_dp ? 10 : 5;
2947 				tlinedrv = is_dkl ? 1 : 2;
2948 			} else {
2949 				a_divratio = 5;
2950 				tlinedrv = 0;
2951 			}
2952 			inputsel = is_dp ? 0 : 1;
2953 
2954 			switch (div1) {
2955 			default:
2956 				MISSING_CASE(div1);
2957 				fallthrough;
2958 			case 2:
2959 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2960 				break;
2961 			case 3:
2962 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2963 				break;
2964 			case 5:
2965 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2966 				break;
2967 			case 7:
2968 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2969 				break;
2970 			}
2971 
2972 			*target_dco_khz = dco;
2973 
2974 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2975 
2976 			hw_state->mg_clktop2_coreclkctl1 =
2977 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2978 
2979 			hw_state->mg_clktop2_hsclkctl =
2980 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2981 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2982 				hsdiv |
2983 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2984 
2985 			return 0;
2986 		}
2987 	}
2988 
2989 	return -EINVAL;
2990 }
2991 
2992 /*
2993  * The specification for this function uses real numbers, so the math had to be
2994  * adapted to integer-only calculation, that's why it looks so different.
2995  */
icl_calc_mg_pll_state(struct intel_crtc_state * crtc_state,struct intel_dpll_hw_state * dpll_hw_state)2996 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2997 				 struct intel_dpll_hw_state *dpll_hw_state)
2998 {
2999 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3000 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3001 	int refclk_khz = i915->display.dpll.ref_clks.nssc;
3002 	int clock = crtc_state->port_clock;
3003 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3004 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3005 	u32 prop_coeff, int_coeff;
3006 	u32 tdc_targetcnt, feedfwgain;
3007 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3008 	u64 tmp;
3009 	bool use_ssc = false;
3010 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3011 	bool is_dkl = DISPLAY_VER(i915) >= 12;
3012 	int ret;
3013 
3014 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3015 				       hw_state, is_dkl);
3016 	if (ret)
3017 		return ret;
3018 
3019 	m1div = 2;
3020 	m2div_int = dco_khz / (refclk_khz * m1div);
3021 	if (m2div_int > 255) {
3022 		if (!is_dkl) {
3023 			m1div = 4;
3024 			m2div_int = dco_khz / (refclk_khz * m1div);
3025 		}
3026 
3027 		if (m2div_int > 255)
3028 			return -EINVAL;
3029 	}
3030 	m2div_rem = dco_khz % (refclk_khz * m1div);
3031 
3032 	tmp = (u64)m2div_rem * (1 << 22);
3033 	do_div(tmp, refclk_khz * m1div);
3034 	m2div_frac = tmp;
3035 
3036 	switch (refclk_khz) {
3037 	case 19200:
3038 		iref_ndiv = 1;
3039 		iref_trim = 28;
3040 		iref_pulse_w = 1;
3041 		break;
3042 	case 24000:
3043 		iref_ndiv = 1;
3044 		iref_trim = 25;
3045 		iref_pulse_w = 2;
3046 		break;
3047 	case 38400:
3048 		iref_ndiv = 2;
3049 		iref_trim = 28;
3050 		iref_pulse_w = 1;
3051 		break;
3052 	default:
3053 		MISSING_CASE(refclk_khz);
3054 		return -EINVAL;
3055 	}
3056 
3057 	/*
3058 	 * tdc_res = 0.000003
3059 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3060 	 *
3061 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3062 	 * was supposed to be a division, but we rearranged the operations of
3063 	 * the formula to avoid early divisions so we don't multiply the
3064 	 * rounding errors.
3065 	 *
3066 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3067 	 * we also rearrange to work with integers.
3068 	 *
3069 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3070 	 * last division by 10.
3071 	 */
3072 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3073 
3074 	/*
3075 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3076 	 * 32 bits. That's not a problem since we round the division down
3077 	 * anyway.
3078 	 */
3079 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3080 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3081 
3082 	if (dco_khz >= 9000000) {
3083 		prop_coeff = 5;
3084 		int_coeff = 10;
3085 	} else {
3086 		prop_coeff = 4;
3087 		int_coeff = 8;
3088 	}
3089 
3090 	if (use_ssc) {
3091 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3092 		do_div(tmp, refclk_khz * m1div * 10000);
3093 		ssc_stepsize = tmp;
3094 
3095 		tmp = mul_u32_u32(dco_khz, 1000);
3096 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3097 	} else {
3098 		ssc_stepsize = 0;
3099 		ssc_steplen = 0;
3100 	}
3101 	ssc_steplog = 4;
3102 
3103 	/* write pll_state calculations */
3104 	if (is_dkl) {
3105 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3106 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3107 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3108 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3109 		if (i915->display.vbt.override_afc_startup) {
3110 			u8 val = i915->display.vbt.override_afc_startup_val;
3111 
3112 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3113 		}
3114 
3115 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3116 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3117 
3118 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3119 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3120 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3121 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3122 
3123 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3124 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3125 
3126 		hw_state->mg_pll_tdc_coldst_bias =
3127 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3128 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3129 
3130 	} else {
3131 		hw_state->mg_pll_div0 =
3132 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3133 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3134 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3135 
3136 		hw_state->mg_pll_div1 =
3137 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3138 			MG_PLL_DIV1_DITHER_DIV_2 |
3139 			MG_PLL_DIV1_NDIVRATIO(1) |
3140 			MG_PLL_DIV1_FBPREDIV(m1div);
3141 
3142 		hw_state->mg_pll_lf =
3143 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3144 			MG_PLL_LF_AFCCNTSEL_512 |
3145 			MG_PLL_LF_GAINCTRL(1) |
3146 			MG_PLL_LF_INT_COEFF(int_coeff) |
3147 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3148 
3149 		hw_state->mg_pll_frac_lock =
3150 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3151 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3152 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3153 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3154 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3155 		if (use_ssc || m2div_rem > 0)
3156 			hw_state->mg_pll_frac_lock |=
3157 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3158 
3159 		hw_state->mg_pll_ssc =
3160 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3161 			MG_PLL_SSC_TYPE(2) |
3162 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3163 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3164 			MG_PLL_SSC_FLLEN |
3165 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3166 
3167 		hw_state->mg_pll_tdc_coldst_bias =
3168 			MG_PLL_TDC_COLDST_COLDSTART |
3169 			MG_PLL_TDC_COLDST_IREFINT_EN |
3170 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3171 			MG_PLL_TDC_TDCOVCCORR_EN |
3172 			MG_PLL_TDC_TDCSEL(3);
3173 
3174 		hw_state->mg_pll_bias =
3175 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3176 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3177 			MG_PLL_BIAS_BIAS_BONUS(10) |
3178 			MG_PLL_BIAS_BIASCAL_EN |
3179 			MG_PLL_BIAS_CTRIM(12) |
3180 			MG_PLL_BIAS_VREF_RDAC(4) |
3181 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3182 
3183 		if (refclk_khz == 38400) {
3184 			hw_state->mg_pll_tdc_coldst_bias_mask =
3185 				MG_PLL_TDC_COLDST_COLDSTART;
3186 			hw_state->mg_pll_bias_mask = 0;
3187 		} else {
3188 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3189 			hw_state->mg_pll_bias_mask = -1U;
3190 		}
3191 
3192 		hw_state->mg_pll_tdc_coldst_bias &=
3193 			hw_state->mg_pll_tdc_coldst_bias_mask;
3194 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3195 	}
3196 
3197 	return 0;
3198 }
3199 
icl_ddi_mg_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3200 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3201 				   const struct intel_shared_dpll *pll,
3202 				   const struct intel_dpll_hw_state *dpll_hw_state)
3203 {
3204 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3205 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3206 	u64 tmp;
3207 
3208 	ref_clock = i915->display.dpll.ref_clks.nssc;
3209 
3210 	if (DISPLAY_VER(i915) >= 12) {
3211 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3212 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3213 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3214 
3215 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3216 			m2_frac = hw_state->mg_pll_bias &
3217 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3218 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3219 		} else {
3220 			m2_frac = 0;
3221 		}
3222 	} else {
3223 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3224 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3225 
3226 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3227 			m2_frac = hw_state->mg_pll_div0 &
3228 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3229 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3230 		} else {
3231 			m2_frac = 0;
3232 		}
3233 	}
3234 
3235 	switch (hw_state->mg_clktop2_hsclkctl &
3236 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3237 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3238 		div1 = 2;
3239 		break;
3240 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3241 		div1 = 3;
3242 		break;
3243 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3244 		div1 = 5;
3245 		break;
3246 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3247 		div1 = 7;
3248 		break;
3249 	default:
3250 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3251 		return 0;
3252 	}
3253 
3254 	div2 = (hw_state->mg_clktop2_hsclkctl &
3255 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3256 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3257 
3258 	/* div2 value of 0 is same as 1 means no div */
3259 	if (div2 == 0)
3260 		div2 = 1;
3261 
3262 	/*
3263 	 * Adjust the original formula to delay the division by 2^22 in order to
3264 	 * minimize possible rounding errors.
3265 	 */
3266 	tmp = (u64)m1 * m2_int * ref_clock +
3267 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3268 	tmp = div_u64(tmp, 5 * div1 * div2);
3269 
3270 	return tmp;
3271 }
3272 
3273 /**
3274  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3275  * @crtc_state: state for the CRTC to select the DPLL for
3276  * @port_dpll_id: the active @port_dpll_id to select
3277  *
3278  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3279  * CRTC.
3280  */
icl_set_active_port_dpll(struct intel_crtc_state * crtc_state,enum icl_port_dpll_id port_dpll_id)3281 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3282 			      enum icl_port_dpll_id port_dpll_id)
3283 {
3284 	struct icl_port_dpll *port_dpll =
3285 		&crtc_state->icl_port_dplls[port_dpll_id];
3286 
3287 	crtc_state->shared_dpll = port_dpll->pll;
3288 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3289 }
3290 
icl_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3291 static void icl_update_active_dpll(struct intel_atomic_state *state,
3292 				   struct intel_crtc *crtc,
3293 				   struct intel_encoder *encoder)
3294 {
3295 	struct intel_crtc_state *crtc_state =
3296 		intel_atomic_get_new_crtc_state(state, crtc);
3297 	struct intel_digital_port *primary_port;
3298 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3299 
3300 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3301 		enc_to_mst(encoder)->primary :
3302 		enc_to_dig_port(encoder);
3303 
3304 	if (primary_port &&
3305 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3306 	     intel_tc_port_in_legacy_mode(primary_port)))
3307 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3308 
3309 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3310 }
3311 
icl_compute_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)3312 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3313 				      struct intel_crtc *crtc)
3314 {
3315 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3316 	struct intel_crtc_state *crtc_state =
3317 		intel_atomic_get_new_crtc_state(state, crtc);
3318 	struct icl_port_dpll *port_dpll =
3319 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3320 	struct skl_wrpll_params pll_params = {};
3321 	int ret;
3322 
3323 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3324 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3325 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3326 	else
3327 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3328 
3329 	if (ret)
3330 		return ret;
3331 
3332 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3333 
3334 	/* this is mainly for the fastset check */
3335 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3336 
3337 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3338 							    &port_dpll->hw_state);
3339 
3340 	return 0;
3341 }
3342 
icl_get_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3343 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3344 				  struct intel_crtc *crtc,
3345 				  struct intel_encoder *encoder)
3346 {
3347 	struct intel_display *display = to_intel_display(crtc);
3348 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3349 	struct intel_crtc_state *crtc_state =
3350 		intel_atomic_get_new_crtc_state(state, crtc);
3351 	struct icl_port_dpll *port_dpll =
3352 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3353 	enum port port = encoder->port;
3354 	unsigned long dpll_mask;
3355 
3356 	if (IS_ALDERLAKE_S(i915)) {
3357 		dpll_mask =
3358 			BIT(DPLL_ID_DG1_DPLL3) |
3359 			BIT(DPLL_ID_DG1_DPLL2) |
3360 			BIT(DPLL_ID_ICL_DPLL1) |
3361 			BIT(DPLL_ID_ICL_DPLL0);
3362 	} else if (IS_DG1(i915)) {
3363 		if (port == PORT_D || port == PORT_E) {
3364 			dpll_mask =
3365 				BIT(DPLL_ID_DG1_DPLL2) |
3366 				BIT(DPLL_ID_DG1_DPLL3);
3367 		} else {
3368 			dpll_mask =
3369 				BIT(DPLL_ID_DG1_DPLL0) |
3370 				BIT(DPLL_ID_DG1_DPLL1);
3371 		}
3372 	} else if (IS_ROCKETLAKE(i915)) {
3373 		dpll_mask =
3374 			BIT(DPLL_ID_EHL_DPLL4) |
3375 			BIT(DPLL_ID_ICL_DPLL1) |
3376 			BIT(DPLL_ID_ICL_DPLL0);
3377 	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3378 		   port != PORT_A) {
3379 		dpll_mask =
3380 			BIT(DPLL_ID_EHL_DPLL4) |
3381 			BIT(DPLL_ID_ICL_DPLL1) |
3382 			BIT(DPLL_ID_ICL_DPLL0);
3383 	} else {
3384 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3385 	}
3386 
3387 	/* Eliminate DPLLs from consideration if reserved by HTI */
3388 	dpll_mask &= ~intel_hti_dpll_mask(display);
3389 
3390 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3391 						&port_dpll->hw_state,
3392 						dpll_mask);
3393 	if (!port_dpll->pll)
3394 		return -EINVAL;
3395 
3396 	intel_reference_shared_dpll(state, crtc,
3397 				    port_dpll->pll, &port_dpll->hw_state);
3398 
3399 	icl_update_active_dpll(state, crtc, encoder);
3400 
3401 	return 0;
3402 }
3403 
icl_compute_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3404 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3405 				    struct intel_crtc *crtc)
3406 {
3407 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3408 	struct intel_crtc_state *crtc_state =
3409 		intel_atomic_get_new_crtc_state(state, crtc);
3410 	const struct intel_crtc_state *old_crtc_state =
3411 		intel_atomic_get_old_crtc_state(state, crtc);
3412 	struct icl_port_dpll *port_dpll =
3413 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3414 	struct skl_wrpll_params pll_params = {};
3415 	int ret;
3416 
3417 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3418 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3419 	if (ret)
3420 		return ret;
3421 
3422 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3423 
3424 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3425 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3426 	if (ret)
3427 		return ret;
3428 
3429 	/* this is mainly for the fastset check */
3430 	if (old_crtc_state->shared_dpll &&
3431 	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3432 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3433 	else
3434 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3435 
3436 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3437 							 &port_dpll->hw_state);
3438 
3439 	return 0;
3440 }
3441 
icl_get_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3442 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3443 				struct intel_crtc *crtc,
3444 				struct intel_encoder *encoder)
3445 {
3446 	struct intel_crtc_state *crtc_state =
3447 		intel_atomic_get_new_crtc_state(state, crtc);
3448 	struct icl_port_dpll *port_dpll =
3449 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3450 	enum intel_dpll_id dpll_id;
3451 	int ret;
3452 
3453 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3454 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3455 						&port_dpll->hw_state,
3456 						BIT(DPLL_ID_ICL_TBTPLL));
3457 	if (!port_dpll->pll)
3458 		return -EINVAL;
3459 	intel_reference_shared_dpll(state, crtc,
3460 				    port_dpll->pll, &port_dpll->hw_state);
3461 
3462 
3463 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3464 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3465 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3466 						&port_dpll->hw_state,
3467 						BIT(dpll_id));
3468 	if (!port_dpll->pll) {
3469 		ret = -EINVAL;
3470 		goto err_unreference_tbt_pll;
3471 	}
3472 	intel_reference_shared_dpll(state, crtc,
3473 				    port_dpll->pll, &port_dpll->hw_state);
3474 
3475 	icl_update_active_dpll(state, crtc, encoder);
3476 
3477 	return 0;
3478 
3479 err_unreference_tbt_pll:
3480 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3481 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3482 
3483 	return ret;
3484 }
3485 
icl_compute_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3486 static int icl_compute_dplls(struct intel_atomic_state *state,
3487 			     struct intel_crtc *crtc,
3488 			     struct intel_encoder *encoder)
3489 {
3490 	if (intel_encoder_is_combo(encoder))
3491 		return icl_compute_combo_phy_dpll(state, crtc);
3492 	else if (intel_encoder_is_tc(encoder))
3493 		return icl_compute_tc_phy_dplls(state, crtc);
3494 
3495 	MISSING_CASE(encoder->port);
3496 
3497 	return 0;
3498 }
3499 
icl_get_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3500 static int icl_get_dplls(struct intel_atomic_state *state,
3501 			 struct intel_crtc *crtc,
3502 			 struct intel_encoder *encoder)
3503 {
3504 	if (intel_encoder_is_combo(encoder))
3505 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3506 	else if (intel_encoder_is_tc(encoder))
3507 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3508 
3509 	MISSING_CASE(encoder->port);
3510 
3511 	return -EINVAL;
3512 }
3513 
icl_put_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3514 static void icl_put_dplls(struct intel_atomic_state *state,
3515 			  struct intel_crtc *crtc)
3516 {
3517 	const struct intel_crtc_state *old_crtc_state =
3518 		intel_atomic_get_old_crtc_state(state, crtc);
3519 	struct intel_crtc_state *new_crtc_state =
3520 		intel_atomic_get_new_crtc_state(state, crtc);
3521 	enum icl_port_dpll_id id;
3522 
3523 	new_crtc_state->shared_dpll = NULL;
3524 
3525 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3526 		const struct icl_port_dpll *old_port_dpll =
3527 			&old_crtc_state->icl_port_dplls[id];
3528 		struct icl_port_dpll *new_port_dpll =
3529 			&new_crtc_state->icl_port_dplls[id];
3530 
3531 		new_port_dpll->pll = NULL;
3532 
3533 		if (!old_port_dpll->pll)
3534 			continue;
3535 
3536 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3537 	}
3538 }
3539 
mg_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3540 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3541 				struct intel_shared_dpll *pll,
3542 				struct intel_dpll_hw_state *dpll_hw_state)
3543 {
3544 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3545 	const enum intel_dpll_id id = pll->info->id;
3546 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3547 	intel_wakeref_t wakeref;
3548 	bool ret = false;
3549 	u32 val;
3550 
3551 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3552 
3553 	wakeref = intel_display_power_get_if_enabled(i915,
3554 						     POWER_DOMAIN_DISPLAY_CORE);
3555 	if (!wakeref)
3556 		return false;
3557 
3558 	val = intel_de_read(i915, enable_reg);
3559 	if (!(val & PLL_ENABLE))
3560 		goto out;
3561 
3562 	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3563 						  MG_REFCLKIN_CTL(tc_port));
3564 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3565 
3566 	hw_state->mg_clktop2_coreclkctl1 =
3567 		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3568 	hw_state->mg_clktop2_coreclkctl1 &=
3569 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3570 
3571 	hw_state->mg_clktop2_hsclkctl =
3572 		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3573 	hw_state->mg_clktop2_hsclkctl &=
3574 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3575 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3576 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3577 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3578 
3579 	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3580 	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3581 	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3582 	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3583 						   MG_PLL_FRAC_LOCK(tc_port));
3584 	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3585 
3586 	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3587 	hw_state->mg_pll_tdc_coldst_bias =
3588 		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3589 
3590 	if (i915->display.dpll.ref_clks.nssc == 38400) {
3591 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3592 		hw_state->mg_pll_bias_mask = 0;
3593 	} else {
3594 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3595 		hw_state->mg_pll_bias_mask = -1U;
3596 	}
3597 
3598 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3599 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3600 
3601 	ret = true;
3602 out:
3603 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3604 	return ret;
3605 }
3606 
dkl_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3607 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3608 				 struct intel_shared_dpll *pll,
3609 				 struct intel_dpll_hw_state *dpll_hw_state)
3610 {
3611 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3612 	const enum intel_dpll_id id = pll->info->id;
3613 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3614 	intel_wakeref_t wakeref;
3615 	bool ret = false;
3616 	u32 val;
3617 
3618 	wakeref = intel_display_power_get_if_enabled(i915,
3619 						     POWER_DOMAIN_DISPLAY_CORE);
3620 	if (!wakeref)
3621 		return false;
3622 
3623 	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3624 	if (!(val & PLL_ENABLE))
3625 		goto out;
3626 
3627 	/*
3628 	 * All registers read here have the same HIP_INDEX_REG even though
3629 	 * they are on different building blocks
3630 	 */
3631 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3632 						       DKL_REFCLKIN_CTL(tc_port));
3633 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3634 
3635 	hw_state->mg_clktop2_hsclkctl =
3636 		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3637 	hw_state->mg_clktop2_hsclkctl &=
3638 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3639 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3640 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3641 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3642 
3643 	hw_state->mg_clktop2_coreclkctl1 =
3644 		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3645 	hw_state->mg_clktop2_coreclkctl1 &=
3646 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3647 
3648 	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3649 	val = DKL_PLL_DIV0_MASK;
3650 	if (i915->display.vbt.override_afc_startup)
3651 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3652 	hw_state->mg_pll_div0 &= val;
3653 
3654 	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3655 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3656 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3657 
3658 	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3659 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3660 				 DKL_PLL_SSC_STEP_LEN_MASK |
3661 				 DKL_PLL_SSC_STEP_NUM_MASK |
3662 				 DKL_PLL_SSC_EN);
3663 
3664 	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3665 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3666 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3667 
3668 	hw_state->mg_pll_tdc_coldst_bias =
3669 		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3670 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3671 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3672 
3673 	ret = true;
3674 out:
3675 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3676 	return ret;
3677 }
3678 
icl_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state,i915_reg_t enable_reg)3679 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3680 				 struct intel_shared_dpll *pll,
3681 				 struct intel_dpll_hw_state *dpll_hw_state,
3682 				 i915_reg_t enable_reg)
3683 {
3684 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3685 	const enum intel_dpll_id id = pll->info->id;
3686 	intel_wakeref_t wakeref;
3687 	bool ret = false;
3688 	u32 val;
3689 
3690 	wakeref = intel_display_power_get_if_enabled(i915,
3691 						     POWER_DOMAIN_DISPLAY_CORE);
3692 	if (!wakeref)
3693 		return false;
3694 
3695 	val = intel_de_read(i915, enable_reg);
3696 	if (!(val & PLL_ENABLE))
3697 		goto out;
3698 
3699 	if (IS_ALDERLAKE_S(i915)) {
3700 		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3701 		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3702 	} else if (IS_DG1(i915)) {
3703 		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3704 		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3705 	} else if (IS_ROCKETLAKE(i915)) {
3706 		hw_state->cfgcr0 = intel_de_read(i915,
3707 						 RKL_DPLL_CFGCR0(id));
3708 		hw_state->cfgcr1 = intel_de_read(i915,
3709 						 RKL_DPLL_CFGCR1(id));
3710 	} else if (DISPLAY_VER(i915) >= 12) {
3711 		hw_state->cfgcr0 = intel_de_read(i915,
3712 						 TGL_DPLL_CFGCR0(id));
3713 		hw_state->cfgcr1 = intel_de_read(i915,
3714 						 TGL_DPLL_CFGCR1(id));
3715 		if (i915->display.vbt.override_afc_startup) {
3716 			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3717 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3718 		}
3719 	} else {
3720 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3721 		    id == DPLL_ID_EHL_DPLL4) {
3722 			hw_state->cfgcr0 = intel_de_read(i915,
3723 							 ICL_DPLL_CFGCR0(4));
3724 			hw_state->cfgcr1 = intel_de_read(i915,
3725 							 ICL_DPLL_CFGCR1(4));
3726 		} else {
3727 			hw_state->cfgcr0 = intel_de_read(i915,
3728 							 ICL_DPLL_CFGCR0(id));
3729 			hw_state->cfgcr1 = intel_de_read(i915,
3730 							 ICL_DPLL_CFGCR1(id));
3731 		}
3732 	}
3733 
3734 	ret = true;
3735 out:
3736 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3737 	return ret;
3738 }
3739 
combo_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3740 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3741 				   struct intel_shared_dpll *pll,
3742 				   struct intel_dpll_hw_state *dpll_hw_state)
3743 {
3744 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3745 
3746 	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
3747 }
3748 
tbt_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3749 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3750 				 struct intel_shared_dpll *pll,
3751 				 struct intel_dpll_hw_state *dpll_hw_state)
3752 {
3753 	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
3754 }
3755 
icl_dpll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3756 static void icl_dpll_write(struct drm_i915_private *i915,
3757 			   struct intel_shared_dpll *pll,
3758 			   const struct icl_dpll_hw_state *hw_state)
3759 {
3760 	const enum intel_dpll_id id = pll->info->id;
3761 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3762 
3763 	if (IS_ALDERLAKE_S(i915)) {
3764 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3765 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3766 	} else if (IS_DG1(i915)) {
3767 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3768 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3769 	} else if (IS_ROCKETLAKE(i915)) {
3770 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3771 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3772 	} else if (DISPLAY_VER(i915) >= 12) {
3773 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3774 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3775 		div0_reg = TGL_DPLL0_DIV0(id);
3776 	} else {
3777 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3778 		    id == DPLL_ID_EHL_DPLL4) {
3779 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3780 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3781 		} else {
3782 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3783 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3784 		}
3785 	}
3786 
3787 	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3788 	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3789 	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3790 			 !i915_mmio_reg_valid(div0_reg));
3791 	if (i915->display.vbt.override_afc_startup &&
3792 	    i915_mmio_reg_valid(div0_reg))
3793 		intel_de_rmw(i915, div0_reg,
3794 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3795 	intel_de_posting_read(i915, cfgcr1_reg);
3796 }
3797 
icl_mg_pll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3798 static void icl_mg_pll_write(struct drm_i915_private *i915,
3799 			     struct intel_shared_dpll *pll,
3800 			     const struct icl_dpll_hw_state *hw_state)
3801 {
3802 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3803 
3804 	/*
3805 	 * Some of the following registers have reserved fields, so program
3806 	 * these with RMW based on a mask. The mask can be fixed or generated
3807 	 * during the calc/readout phase if the mask depends on some other HW
3808 	 * state like refclk, see icl_calc_mg_pll_state().
3809 	 */
3810 	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3811 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3812 
3813 	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3814 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3815 		     hw_state->mg_clktop2_coreclkctl1);
3816 
3817 	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3818 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3819 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3820 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3821 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3822 		     hw_state->mg_clktop2_hsclkctl);
3823 
3824 	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3825 	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3826 	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3827 	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3828 		       hw_state->mg_pll_frac_lock);
3829 	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3830 
3831 	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3832 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3833 
3834 	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3835 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3836 		     hw_state->mg_pll_tdc_coldst_bias);
3837 
3838 	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3839 }
3840 
dkl_pll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3841 static void dkl_pll_write(struct drm_i915_private *i915,
3842 			  struct intel_shared_dpll *pll,
3843 			  const struct icl_dpll_hw_state *hw_state)
3844 {
3845 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3846 	u32 val;
3847 
3848 	/*
3849 	 * All registers programmed here have the same HIP_INDEX_REG even
3850 	 * though on different building block
3851 	 */
3852 	/* All the registers are RMW */
3853 	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3854 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3855 	val |= hw_state->mg_refclkin_ctl;
3856 	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3857 
3858 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3859 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3860 	val |= hw_state->mg_clktop2_coreclkctl1;
3861 	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3862 
3863 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3864 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3865 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3866 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3867 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3868 	val |= hw_state->mg_clktop2_hsclkctl;
3869 	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3870 
3871 	val = DKL_PLL_DIV0_MASK;
3872 	if (i915->display.vbt.override_afc_startup)
3873 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3874 	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3875 			  hw_state->mg_pll_div0);
3876 
3877 	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3878 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3879 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3880 	val |= hw_state->mg_pll_div1;
3881 	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3882 
3883 	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3884 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3885 		 DKL_PLL_SSC_STEP_LEN_MASK |
3886 		 DKL_PLL_SSC_STEP_NUM_MASK |
3887 		 DKL_PLL_SSC_EN);
3888 	val |= hw_state->mg_pll_ssc;
3889 	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3890 
3891 	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3892 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3893 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3894 	val |= hw_state->mg_pll_bias;
3895 	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3896 
3897 	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3898 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3899 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3900 	val |= hw_state->mg_pll_tdc_coldst_bias;
3901 	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3902 
3903 	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3904 }
3905 
icl_pll_power_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3906 static void icl_pll_power_enable(struct drm_i915_private *i915,
3907 				 struct intel_shared_dpll *pll,
3908 				 i915_reg_t enable_reg)
3909 {
3910 	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3911 
3912 	/*
3913 	 * The spec says we need to "wait" but it also says it should be
3914 	 * immediate.
3915 	 */
3916 	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3917 		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3918 			pll->info->id);
3919 }
3920 
icl_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3921 static void icl_pll_enable(struct drm_i915_private *i915,
3922 			   struct intel_shared_dpll *pll,
3923 			   i915_reg_t enable_reg)
3924 {
3925 	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3926 
3927 	/* Timeout is actually 600us. */
3928 	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3929 		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3930 }
3931 
adlp_cmtg_clock_gating_wa(struct drm_i915_private * i915,struct intel_shared_dpll * pll)3932 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3933 {
3934 	u32 val;
3935 
3936 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3937 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3938 		return;
3939 	/*
3940 	 * Wa_16011069516:adl-p[a0]
3941 	 *
3942 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3943 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3944 	 * sanity check this assumption with a double read, which presumably
3945 	 * returns the correct value even with clock gating on.
3946 	 *
3947 	 * Instead of the usual place for workarounds we apply this one here,
3948 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3949 	 */
3950 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3951 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3952 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3953 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3954 }
3955 
combo_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3956 static void combo_pll_enable(struct drm_i915_private *i915,
3957 			     struct intel_shared_dpll *pll,
3958 			     const struct intel_dpll_hw_state *dpll_hw_state)
3959 {
3960 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3961 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3962 
3963 	icl_pll_power_enable(i915, pll, enable_reg);
3964 
3965 	icl_dpll_write(i915, pll, hw_state);
3966 
3967 	/*
3968 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3969 	 * paths should already be setting the appropriate voltage, hence we do
3970 	 * nothing here.
3971 	 */
3972 
3973 	icl_pll_enable(i915, pll, enable_reg);
3974 
3975 	adlp_cmtg_clock_gating_wa(i915, pll);
3976 
3977 	/* DVFS post sequence would be here. See the comment above. */
3978 }
3979 
tbt_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3980 static void tbt_pll_enable(struct drm_i915_private *i915,
3981 			   struct intel_shared_dpll *pll,
3982 			   const struct intel_dpll_hw_state *dpll_hw_state)
3983 {
3984 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3985 
3986 	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3987 
3988 	icl_dpll_write(i915, pll, hw_state);
3989 
3990 	/*
3991 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3992 	 * paths should already be setting the appropriate voltage, hence we do
3993 	 * nothing here.
3994 	 */
3995 
3996 	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3997 
3998 	/* DVFS post sequence would be here. See the comment above. */
3999 }
4000 
mg_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)4001 static void mg_pll_enable(struct drm_i915_private *i915,
4002 			  struct intel_shared_dpll *pll,
4003 			  const struct intel_dpll_hw_state *dpll_hw_state)
4004 {
4005 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4006 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4007 
4008 	icl_pll_power_enable(i915, pll, enable_reg);
4009 
4010 	if (DISPLAY_VER(i915) >= 12)
4011 		dkl_pll_write(i915, pll, hw_state);
4012 	else
4013 		icl_mg_pll_write(i915, pll, hw_state);
4014 
4015 	/*
4016 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4017 	 * paths should already be setting the appropriate voltage, hence we do
4018 	 * nothing here.
4019 	 */
4020 
4021 	icl_pll_enable(i915, pll, enable_reg);
4022 
4023 	/* DVFS post sequence would be here. See the comment above. */
4024 }
4025 
icl_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)4026 static void icl_pll_disable(struct drm_i915_private *i915,
4027 			    struct intel_shared_dpll *pll,
4028 			    i915_reg_t enable_reg)
4029 {
4030 	/* The first steps are done by intel_ddi_post_disable(). */
4031 
4032 	/*
4033 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4034 	 * paths should already be setting the appropriate voltage, hence we do
4035 	 * nothing here.
4036 	 */
4037 
4038 	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
4039 
4040 	/* Timeout is actually 1us. */
4041 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
4042 		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
4043 
4044 	/* DVFS post sequence would be here. See the comment above. */
4045 
4046 	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
4047 
4048 	/*
4049 	 * The spec says we need to "wait" but it also says it should be
4050 	 * immediate.
4051 	 */
4052 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
4053 		drm_err(&i915->drm, "PLL %d Power not disabled\n",
4054 			pll->info->id);
4055 }
4056 
combo_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4057 static void combo_pll_disable(struct drm_i915_private *i915,
4058 			      struct intel_shared_dpll *pll)
4059 {
4060 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
4061 
4062 	icl_pll_disable(i915, pll, enable_reg);
4063 }
4064 
tbt_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4065 static void tbt_pll_disable(struct drm_i915_private *i915,
4066 			    struct intel_shared_dpll *pll)
4067 {
4068 	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4069 }
4070 
mg_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4071 static void mg_pll_disable(struct drm_i915_private *i915,
4072 			   struct intel_shared_dpll *pll)
4073 {
4074 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4075 
4076 	icl_pll_disable(i915, pll, enable_reg);
4077 }
4078 
icl_update_dpll_ref_clks(struct drm_i915_private * i915)4079 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4080 {
4081 	/* No SSC ref */
4082 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4083 }
4084 
icl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4085 static void icl_dump_hw_state(struct drm_printer *p,
4086 			      const struct intel_dpll_hw_state *dpll_hw_state)
4087 {
4088 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4089 
4090 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4091 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4092 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4093 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4094 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4095 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4096 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4097 		   hw_state->mg_refclkin_ctl,
4098 		   hw_state->mg_clktop2_coreclkctl1,
4099 		   hw_state->mg_clktop2_hsclkctl,
4100 		   hw_state->mg_pll_div0,
4101 		   hw_state->mg_pll_div1,
4102 		   hw_state->mg_pll_lf,
4103 		   hw_state->mg_pll_frac_lock,
4104 		   hw_state->mg_pll_ssc,
4105 		   hw_state->mg_pll_bias,
4106 		   hw_state->mg_pll_tdc_coldst_bias);
4107 }
4108 
icl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)4109 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4110 				 const struct intel_dpll_hw_state *_b)
4111 {
4112 	const struct icl_dpll_hw_state *a = &_a->icl;
4113 	const struct icl_dpll_hw_state *b = &_b->icl;
4114 
4115 	/* FIXME split combo vs. mg more thoroughly */
4116 	return a->cfgcr0 == b->cfgcr0 &&
4117 		a->cfgcr1 == b->cfgcr1 &&
4118 		a->div0 == b->div0 &&
4119 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4120 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4121 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4122 		a->mg_pll_div0 == b->mg_pll_div0 &&
4123 		a->mg_pll_div1 == b->mg_pll_div1 &&
4124 		a->mg_pll_lf == b->mg_pll_lf &&
4125 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4126 		a->mg_pll_ssc == b->mg_pll_ssc &&
4127 		a->mg_pll_bias == b->mg_pll_bias &&
4128 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4129 }
4130 
4131 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4132 	.enable = combo_pll_enable,
4133 	.disable = combo_pll_disable,
4134 	.get_hw_state = combo_pll_get_hw_state,
4135 	.get_freq = icl_ddi_combo_pll_get_freq,
4136 };
4137 
4138 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4139 	.enable = tbt_pll_enable,
4140 	.disable = tbt_pll_disable,
4141 	.get_hw_state = tbt_pll_get_hw_state,
4142 	.get_freq = icl_ddi_tbt_pll_get_freq,
4143 };
4144 
4145 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4146 	.enable = mg_pll_enable,
4147 	.disable = mg_pll_disable,
4148 	.get_hw_state = mg_pll_get_hw_state,
4149 	.get_freq = icl_ddi_mg_pll_get_freq,
4150 };
4151 
4152 static const struct dpll_info icl_plls[] = {
4153 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4154 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4155 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4156 	  .is_alt_port_dpll = true, },
4157 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4158 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4159 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4160 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4161 	{}
4162 };
4163 
4164 static const struct intel_dpll_mgr icl_pll_mgr = {
4165 	.dpll_info = icl_plls,
4166 	.compute_dplls = icl_compute_dplls,
4167 	.get_dplls = icl_get_dplls,
4168 	.put_dplls = icl_put_dplls,
4169 	.update_active_dpll = icl_update_active_dpll,
4170 	.update_ref_clks = icl_update_dpll_ref_clks,
4171 	.dump_hw_state = icl_dump_hw_state,
4172 	.compare_hw_state = icl_compare_hw_state,
4173 };
4174 
4175 static const struct dpll_info ehl_plls[] = {
4176 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4177 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4178 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4179 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4180 	{}
4181 };
4182 
4183 static const struct intel_dpll_mgr ehl_pll_mgr = {
4184 	.dpll_info = ehl_plls,
4185 	.compute_dplls = icl_compute_dplls,
4186 	.get_dplls = icl_get_dplls,
4187 	.put_dplls = icl_put_dplls,
4188 	.update_ref_clks = icl_update_dpll_ref_clks,
4189 	.dump_hw_state = icl_dump_hw_state,
4190 	.compare_hw_state = icl_compare_hw_state,
4191 };
4192 
4193 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4194 	.enable = mg_pll_enable,
4195 	.disable = mg_pll_disable,
4196 	.get_hw_state = dkl_pll_get_hw_state,
4197 	.get_freq = icl_ddi_mg_pll_get_freq,
4198 };
4199 
4200 static const struct dpll_info tgl_plls[] = {
4201 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4202 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4203 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4204 	  .is_alt_port_dpll = true, },
4205 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4206 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4207 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4208 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4209 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4210 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4211 	{}
4212 };
4213 
4214 static const struct intel_dpll_mgr tgl_pll_mgr = {
4215 	.dpll_info = tgl_plls,
4216 	.compute_dplls = icl_compute_dplls,
4217 	.get_dplls = icl_get_dplls,
4218 	.put_dplls = icl_put_dplls,
4219 	.update_active_dpll = icl_update_active_dpll,
4220 	.update_ref_clks = icl_update_dpll_ref_clks,
4221 	.dump_hw_state = icl_dump_hw_state,
4222 	.compare_hw_state = icl_compare_hw_state,
4223 };
4224 
4225 static const struct dpll_info rkl_plls[] = {
4226 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4227 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4228 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4229 	{}
4230 };
4231 
4232 static const struct intel_dpll_mgr rkl_pll_mgr = {
4233 	.dpll_info = rkl_plls,
4234 	.compute_dplls = icl_compute_dplls,
4235 	.get_dplls = icl_get_dplls,
4236 	.put_dplls = icl_put_dplls,
4237 	.update_ref_clks = icl_update_dpll_ref_clks,
4238 	.dump_hw_state = icl_dump_hw_state,
4239 	.compare_hw_state = icl_compare_hw_state,
4240 };
4241 
4242 static const struct dpll_info dg1_plls[] = {
4243 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4244 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4245 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4246 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4247 	{}
4248 };
4249 
4250 static const struct intel_dpll_mgr dg1_pll_mgr = {
4251 	.dpll_info = dg1_plls,
4252 	.compute_dplls = icl_compute_dplls,
4253 	.get_dplls = icl_get_dplls,
4254 	.put_dplls = icl_put_dplls,
4255 	.update_ref_clks = icl_update_dpll_ref_clks,
4256 	.dump_hw_state = icl_dump_hw_state,
4257 	.compare_hw_state = icl_compare_hw_state,
4258 };
4259 
4260 static const struct dpll_info adls_plls[] = {
4261 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4262 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4263 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4264 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4265 	{}
4266 };
4267 
4268 static const struct intel_dpll_mgr adls_pll_mgr = {
4269 	.dpll_info = adls_plls,
4270 	.compute_dplls = icl_compute_dplls,
4271 	.get_dplls = icl_get_dplls,
4272 	.put_dplls = icl_put_dplls,
4273 	.update_ref_clks = icl_update_dpll_ref_clks,
4274 	.dump_hw_state = icl_dump_hw_state,
4275 	.compare_hw_state = icl_compare_hw_state,
4276 };
4277 
4278 static const struct dpll_info adlp_plls[] = {
4279 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4280 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4281 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4282 	  .is_alt_port_dpll = true, },
4283 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4284 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4285 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4286 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4287 	{}
4288 };
4289 
4290 static const struct intel_dpll_mgr adlp_pll_mgr = {
4291 	.dpll_info = adlp_plls,
4292 	.compute_dplls = icl_compute_dplls,
4293 	.get_dplls = icl_get_dplls,
4294 	.put_dplls = icl_put_dplls,
4295 	.update_active_dpll = icl_update_active_dpll,
4296 	.update_ref_clks = icl_update_dpll_ref_clks,
4297 	.dump_hw_state = icl_dump_hw_state,
4298 	.compare_hw_state = icl_compare_hw_state,
4299 };
4300 
4301 /**
4302  * intel_shared_dpll_init - Initialize shared DPLLs
4303  * @i915: i915 device
4304  *
4305  * Initialize shared DPLLs for @i915.
4306  */
intel_shared_dpll_init(struct drm_i915_private * i915)4307 void intel_shared_dpll_init(struct drm_i915_private *i915)
4308 {
4309 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4310 	const struct dpll_info *dpll_info;
4311 	int i;
4312 
4313 	mutex_init(&i915->display.dpll.lock);
4314 
4315 	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4316 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4317 		dpll_mgr = NULL;
4318 	else if (IS_ALDERLAKE_P(i915))
4319 		dpll_mgr = &adlp_pll_mgr;
4320 	else if (IS_ALDERLAKE_S(i915))
4321 		dpll_mgr = &adls_pll_mgr;
4322 	else if (IS_DG1(i915))
4323 		dpll_mgr = &dg1_pll_mgr;
4324 	else if (IS_ROCKETLAKE(i915))
4325 		dpll_mgr = &rkl_pll_mgr;
4326 	else if (DISPLAY_VER(i915) >= 12)
4327 		dpll_mgr = &tgl_pll_mgr;
4328 	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4329 		dpll_mgr = &ehl_pll_mgr;
4330 	else if (DISPLAY_VER(i915) >= 11)
4331 		dpll_mgr = &icl_pll_mgr;
4332 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4333 		dpll_mgr = &bxt_pll_mgr;
4334 	else if (DISPLAY_VER(i915) == 9)
4335 		dpll_mgr = &skl_pll_mgr;
4336 	else if (HAS_DDI(i915))
4337 		dpll_mgr = &hsw_pll_mgr;
4338 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4339 		dpll_mgr = &pch_pll_mgr;
4340 
4341 	if (!dpll_mgr)
4342 		return;
4343 
4344 	dpll_info = dpll_mgr->dpll_info;
4345 
4346 	for (i = 0; dpll_info[i].name; i++) {
4347 		if (drm_WARN_ON(&i915->drm,
4348 				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4349 			break;
4350 
4351 		/* must fit into unsigned long bitmask on 32bit */
4352 		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4353 			break;
4354 
4355 		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4356 		i915->display.dpll.shared_dplls[i].index = i;
4357 	}
4358 
4359 	i915->display.dpll.mgr = dpll_mgr;
4360 	i915->display.dpll.num_shared_dpll = i;
4361 }
4362 
4363 /**
4364  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4365  * @state: atomic state
4366  * @crtc: CRTC to compute DPLLs for
4367  * @encoder: encoder
4368  *
4369  * This function computes the DPLL state for the given CRTC and encoder.
4370  *
4371  * The new configuration in the atomic commit @state is made effective by
4372  * calling intel_shared_dpll_swap_state().
4373  *
4374  * Returns:
4375  * 0 on success, negative error code on falure.
4376  */
intel_compute_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4377 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4378 			       struct intel_crtc *crtc,
4379 			       struct intel_encoder *encoder)
4380 {
4381 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4382 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4383 
4384 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4385 		return -EINVAL;
4386 
4387 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4388 }
4389 
4390 /**
4391  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4392  * @state: atomic state
4393  * @crtc: CRTC to reserve DPLLs for
4394  * @encoder: encoder
4395  *
4396  * This function reserves all required DPLLs for the given CRTC and encoder
4397  * combination in the current atomic commit @state and the new @crtc atomic
4398  * state.
4399  *
4400  * The new configuration in the atomic commit @state is made effective by
4401  * calling intel_shared_dpll_swap_state().
4402  *
4403  * The reserved DPLLs should be released by calling
4404  * intel_release_shared_dplls().
4405  *
4406  * Returns:
4407  * 0 if all required DPLLs were successfully reserved,
4408  * negative error code otherwise.
4409  */
intel_reserve_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4410 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4411 			       struct intel_crtc *crtc,
4412 			       struct intel_encoder *encoder)
4413 {
4414 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4415 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4416 
4417 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4418 		return -EINVAL;
4419 
4420 	return dpll_mgr->get_dplls(state, crtc, encoder);
4421 }
4422 
4423 /**
4424  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4425  * @state: atomic state
4426  * @crtc: crtc from which the DPLLs are to be released
4427  *
4428  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4429  * from the current atomic commit @state and the old @crtc atomic state.
4430  *
4431  * The new configuration in the atomic commit @state is made effective by
4432  * calling intel_shared_dpll_swap_state().
4433  */
intel_release_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)4434 void intel_release_shared_dplls(struct intel_atomic_state *state,
4435 				struct intel_crtc *crtc)
4436 {
4437 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4438 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4439 
4440 	/*
4441 	 * FIXME: this function is called for every platform having a
4442 	 * compute_clock hook, even though the platform doesn't yet support
4443 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4444 	 * called on those.
4445 	 */
4446 	if (!dpll_mgr)
4447 		return;
4448 
4449 	dpll_mgr->put_dplls(state, crtc);
4450 }
4451 
4452 /**
4453  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4454  * @state: atomic state
4455  * @crtc: the CRTC for which to update the active DPLL
4456  * @encoder: encoder determining the type of port DPLL
4457  *
4458  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4459  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4460  * DPLL selected will be based on the current mode of the encoder's port.
4461  */
intel_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4462 void intel_update_active_dpll(struct intel_atomic_state *state,
4463 			      struct intel_crtc *crtc,
4464 			      struct intel_encoder *encoder)
4465 {
4466 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4467 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4468 
4469 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4470 		return;
4471 
4472 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4473 }
4474 
4475 /**
4476  * intel_dpll_get_freq - calculate the DPLL's output frequency
4477  * @i915: i915 device
4478  * @pll: DPLL for which to calculate the output frequency
4479  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4480  *
4481  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4482  */
intel_dpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)4483 int intel_dpll_get_freq(struct drm_i915_private *i915,
4484 			const struct intel_shared_dpll *pll,
4485 			const struct intel_dpll_hw_state *dpll_hw_state)
4486 {
4487 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4488 		return 0;
4489 
4490 	return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
4491 }
4492 
4493 /**
4494  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4495  * @i915: i915 device
4496  * @pll: DPLL for which to calculate the output frequency
4497  * @dpll_hw_state: DPLL's hardware state
4498  *
4499  * Read out @pll's hardware state into @dpll_hw_state.
4500  */
intel_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)4501 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4502 			     struct intel_shared_dpll *pll,
4503 			     struct intel_dpll_hw_state *dpll_hw_state)
4504 {
4505 	return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
4506 }
4507 
readout_dpll_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4508 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4509 				  struct intel_shared_dpll *pll)
4510 {
4511 	struct intel_crtc *crtc;
4512 
4513 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4514 
4515 	if (pll->on && pll->info->power_domain)
4516 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4517 
4518 	pll->state.pipe_mask = 0;
4519 	for_each_intel_crtc(&i915->drm, crtc) {
4520 		struct intel_crtc_state *crtc_state =
4521 			to_intel_crtc_state(crtc->base.state);
4522 
4523 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4524 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4525 	}
4526 	pll->active_mask = pll->state.pipe_mask;
4527 
4528 	drm_dbg_kms(&i915->drm,
4529 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4530 		    pll->info->name, pll->state.pipe_mask, pll->on);
4531 }
4532 
intel_dpll_update_ref_clks(struct drm_i915_private * i915)4533 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4534 {
4535 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4536 		i915->display.dpll.mgr->update_ref_clks(i915);
4537 }
4538 
intel_dpll_readout_hw_state(struct drm_i915_private * i915)4539 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4540 {
4541 	struct intel_shared_dpll *pll;
4542 	int i;
4543 
4544 	for_each_shared_dpll(i915, pll, i)
4545 		readout_dpll_hw_state(i915, pll);
4546 }
4547 
sanitize_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4548 static void sanitize_dpll_state(struct drm_i915_private *i915,
4549 				struct intel_shared_dpll *pll)
4550 {
4551 	if (!pll->on)
4552 		return;
4553 
4554 	adlp_cmtg_clock_gating_wa(i915, pll);
4555 
4556 	if (pll->active_mask)
4557 		return;
4558 
4559 	drm_dbg_kms(&i915->drm,
4560 		    "%s enabled but not in use, disabling\n",
4561 		    pll->info->name);
4562 
4563 	_intel_disable_shared_dpll(i915, pll);
4564 }
4565 
intel_dpll_sanitize_state(struct drm_i915_private * i915)4566 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4567 {
4568 	struct intel_shared_dpll *pll;
4569 	int i;
4570 
4571 	for_each_shared_dpll(i915, pll, i)
4572 		sanitize_dpll_state(i915, pll);
4573 }
4574 
4575 /**
4576  * intel_dpll_dump_hw_state - dump hw_state
4577  * @i915: i915 drm device
4578  * @p: where to print the state to
4579  * @dpll_hw_state: hw state to be dumped
4580  *
4581  * Dumo out the relevant values in @dpll_hw_state.
4582  */
intel_dpll_dump_hw_state(struct drm_i915_private * i915,struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4583 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4584 			      struct drm_printer *p,
4585 			      const struct intel_dpll_hw_state *dpll_hw_state)
4586 {
4587 	if (i915->display.dpll.mgr) {
4588 		i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
4589 	} else {
4590 		/* fallback for platforms that don't use the shared dpll
4591 		 * infrastructure
4592 		 */
4593 		ibx_dump_hw_state(p, dpll_hw_state);
4594 	}
4595 }
4596 
4597 /**
4598  * intel_dpll_compare_hw_state - compare the two states
4599  * @i915: i915 drm device
4600  * @a: first DPLL hw state
4601  * @b: second DPLL hw state
4602  *
4603  * Compare DPLL hw states @a and @b.
4604  *
4605  * Returns: true if the states are equal, false if the differ
4606  */
intel_dpll_compare_hw_state(struct drm_i915_private * i915,const struct intel_dpll_hw_state * a,const struct intel_dpll_hw_state * b)4607 bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4608 				 const struct intel_dpll_hw_state *a,
4609 				 const struct intel_dpll_hw_state *b)
4610 {
4611 	if (i915->display.dpll.mgr) {
4612 		return i915->display.dpll.mgr->compare_hw_state(a, b);
4613 	} else {
4614 		/* fallback for platforms that don't use the shared dpll
4615 		 * infrastructure
4616 		 */
4617 		return ibx_compare_hw_state(a, b);
4618 	}
4619 }
4620 
4621 static void
verify_single_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_crtc * crtc,const struct intel_crtc_state * new_crtc_state)4622 verify_single_dpll_state(struct drm_i915_private *i915,
4623 			 struct intel_shared_dpll *pll,
4624 			 struct intel_crtc *crtc,
4625 			 const struct intel_crtc_state *new_crtc_state)
4626 {
4627 	struct intel_display *display = &i915->display;
4628 	struct intel_dpll_hw_state dpll_hw_state = {};
4629 	u8 pipe_mask;
4630 	bool active;
4631 
4632 	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4633 
4634 	if (!pll->info->always_on) {
4635 		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4636 					 "%s: pll in active use but not on in sw tracking\n",
4637 					 pll->info->name);
4638 		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4639 					 "%s: pll is on but not used by any active pipe\n",
4640 					 pll->info->name);
4641 		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4642 					 "%s: pll on state mismatch (expected %i, found %i)\n",
4643 					 pll->info->name, pll->on, active);
4644 	}
4645 
4646 	if (!crtc) {
4647 		INTEL_DISPLAY_STATE_WARN(display,
4648 					 pll->active_mask & ~pll->state.pipe_mask,
4649 					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4650 					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4651 
4652 		return;
4653 	}
4654 
4655 	pipe_mask = BIT(crtc->pipe);
4656 
4657 	if (new_crtc_state->hw.active)
4658 		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4659 					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4660 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4661 	else
4662 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4663 					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4664 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4665 
4666 	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4667 				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4668 				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4669 
4670 	INTEL_DISPLAY_STATE_WARN(display,
4671 				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4672 						   sizeof(dpll_hw_state)),
4673 				 "%s: pll hw state mismatch\n",
4674 				 pll->info->name);
4675 }
4676 
has_alt_port_dpll(const struct intel_shared_dpll * old_pll,const struct intel_shared_dpll * new_pll)4677 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4678 			      const struct intel_shared_dpll *new_pll)
4679 {
4680 	return old_pll && new_pll && old_pll != new_pll &&
4681 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4682 }
4683 
intel_shared_dpll_state_verify(struct intel_atomic_state * state,struct intel_crtc * crtc)4684 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4685 				    struct intel_crtc *crtc)
4686 {
4687 	struct intel_display *display = to_intel_display(state);
4688 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4689 	const struct intel_crtc_state *old_crtc_state =
4690 		intel_atomic_get_old_crtc_state(state, crtc);
4691 	const struct intel_crtc_state *new_crtc_state =
4692 		intel_atomic_get_new_crtc_state(state, crtc);
4693 
4694 	if (new_crtc_state->shared_dpll)
4695 		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4696 					 crtc, new_crtc_state);
4697 
4698 	if (old_crtc_state->shared_dpll &&
4699 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4700 		u8 pipe_mask = BIT(crtc->pipe);
4701 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4702 
4703 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4704 					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4705 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4706 
4707 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4708 		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4709 								     new_crtc_state->shared_dpll) &&
4710 					 pll->state.pipe_mask & pipe_mask,
4711 					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4712 					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4713 	}
4714 }
4715 
intel_shared_dpll_verify_disabled(struct intel_atomic_state * state)4716 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4717 {
4718 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4719 	struct intel_shared_dpll *pll;
4720 	int i;
4721 
4722 	for_each_shared_dpll(i915, pll, i)
4723 		verify_single_dpll_state(i915, pll, NULL, NULL);
4724 }
4725