xref: /linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "bxt_dpio_phy_regs.h"
28 #include "i915_reg.h"
29 #include "intel_de.h"
30 #include "intel_display_types.h"
31 #include "intel_dkl_phy.h"
32 #include "intel_dkl_phy_regs.h"
33 #include "intel_dpio_phy.h"
34 #include "intel_dpll.h"
35 #include "intel_dpll_mgr.h"
36 #include "intel_hti.h"
37 #include "intel_mg_phy_regs.h"
38 #include "intel_pch_refclk.h"
39 #include "intel_tc.h"
40 
41 /**
42  * DOC: Display PLLs
43  *
44  * Display PLLs used for driving outputs vary by platform. While some have
45  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
46  * from a pool. In the latter scenario, it is possible that multiple pipes
47  * share a PLL if their configurations match.
48  *
49  * This file provides an abstraction over display PLLs. The function
50  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
51  * users of a PLL are tracked and that tracking is integrated with the atomic
52  * modset interface. During an atomic operation, required PLLs can be reserved
53  * for a given CRTC and encoder configuration by calling
54  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
55  * with intel_release_shared_dplls().
56  * Changes to the users are first staged in the atomic state, and then made
57  * effective by calling intel_shared_dpll_swap_state() during the atomic
58  * commit phase.
59  */
60 
61 /* platform specific hooks for managing DPLLs */
62 struct intel_shared_dpll_funcs {
63 	/*
64 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
65 	 * the pll is not already enabled.
66 	 */
67 	void (*enable)(struct drm_i915_private *i915,
68 		       struct intel_shared_dpll *pll,
69 		       const struct intel_dpll_hw_state *dpll_hw_state);
70 
71 	/*
72 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
73 	 * only when it is safe to disable the pll, i.e., there are no more
74 	 * tracked users for it.
75 	 */
76 	void (*disable)(struct drm_i915_private *i915,
77 			struct intel_shared_dpll *pll);
78 
79 	/*
80 	 * Hook for reading the values currently programmed to the DPLL
81 	 * registers. This is used for initial hw state readout and state
82 	 * verification after a mode set.
83 	 */
84 	bool (*get_hw_state)(struct drm_i915_private *i915,
85 			     struct intel_shared_dpll *pll,
86 			     struct intel_dpll_hw_state *dpll_hw_state);
87 
88 	/*
89 	 * Hook for calculating the pll's output frequency based on its passed
90 	 * in state.
91 	 */
92 	int (*get_freq)(struct drm_i915_private *i915,
93 			const struct intel_shared_dpll *pll,
94 			const struct intel_dpll_hw_state *dpll_hw_state);
95 };
96 
97 struct intel_dpll_mgr {
98 	const struct dpll_info *dpll_info;
99 
100 	int (*compute_dplls)(struct intel_atomic_state *state,
101 			     struct intel_crtc *crtc,
102 			     struct intel_encoder *encoder);
103 	int (*get_dplls)(struct intel_atomic_state *state,
104 			 struct intel_crtc *crtc,
105 			 struct intel_encoder *encoder);
106 	void (*put_dplls)(struct intel_atomic_state *state,
107 			  struct intel_crtc *crtc);
108 	void (*update_active_dpll)(struct intel_atomic_state *state,
109 				   struct intel_crtc *crtc,
110 				   struct intel_encoder *encoder);
111 	void (*update_ref_clks)(struct drm_i915_private *i915);
112 	void (*dump_hw_state)(struct drm_printer *p,
113 			      const struct intel_dpll_hw_state *dpll_hw_state);
114 	bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
115 				 const struct intel_dpll_hw_state *b);
116 };
117 
118 static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll_state * shared_dpll)119 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
120 				  struct intel_shared_dpll_state *shared_dpll)
121 {
122 	struct intel_shared_dpll *pll;
123 	int i;
124 
125 	/* Copy shared dpll state */
126 	for_each_shared_dpll(i915, pll, i)
127 		shared_dpll[pll->index] = pll->state;
128 }
129 
130 static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state * s)131 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
132 {
133 	struct intel_atomic_state *state = to_intel_atomic_state(s);
134 
135 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
136 
137 	if (!state->dpll_set) {
138 		state->dpll_set = true;
139 
140 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
141 						  state->shared_dpll);
142 	}
143 
144 	return state->shared_dpll;
145 }
146 
147 /**
148  * intel_get_shared_dpll_by_id - get a DPLL given its id
149  * @i915: i915 device instance
150  * @id: pll id
151  *
152  * Returns:
153  * A pointer to the DPLL with @id
154  */
155 struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private * i915,enum intel_dpll_id id)156 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
157 			    enum intel_dpll_id id)
158 {
159 	struct intel_shared_dpll *pll;
160 	int i;
161 
162 	for_each_shared_dpll(i915, pll, i) {
163 		if (pll->info->id == id)
164 			return pll;
165 	}
166 
167 	MISSING_CASE(id);
168 	return NULL;
169 }
170 
171 /* For ILK+ */
assert_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll,bool state)172 void assert_shared_dpll(struct drm_i915_private *i915,
173 			struct intel_shared_dpll *pll,
174 			bool state)
175 {
176 	struct intel_display *display = &i915->display;
177 	bool cur_state;
178 	struct intel_dpll_hw_state hw_state;
179 
180 	if (drm_WARN(display->drm, !pll,
181 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
182 		return;
183 
184 	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
185 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
186 				 "%s assertion failure (expected %s, current %s)\n",
187 				 pll->info->name, str_on_off(state),
188 				 str_on_off(cur_state));
189 }
190 
icl_pll_id_to_tc_port(enum intel_dpll_id id)191 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
192 {
193 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
194 }
195 
icl_tc_port_to_pll_id(enum tc_port tc_port)196 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
197 {
198 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
199 }
200 
201 static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)202 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
203 			   struct intel_shared_dpll *pll)
204 {
205 	if (IS_DG1(i915))
206 		return DG1_DPLL_ENABLE(pll->info->id);
207 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
208 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
209 		return MG_PLL_ENABLE(0);
210 
211 	return ICL_DPLL_ENABLE(pll->info->id);
212 }
213 
214 static i915_reg_t
intel_tc_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)215 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
216 			struct intel_shared_dpll *pll)
217 {
218 	const enum intel_dpll_id id = pll->info->id;
219 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
220 
221 	if (IS_ALDERLAKE_P(i915))
222 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
223 
224 	return MG_PLL_ENABLE(tc_port);
225 }
226 
_intel_enable_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll)227 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
228 				      struct intel_shared_dpll *pll)
229 {
230 	if (pll->info->power_domain)
231 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
232 
233 	pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
234 	pll->on = true;
235 }
236 
_intel_disable_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll)237 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
238 				       struct intel_shared_dpll *pll)
239 {
240 	pll->info->funcs->disable(i915, pll);
241 	pll->on = false;
242 
243 	if (pll->info->power_domain)
244 		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
245 }
246 
247 /**
248  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
249  * @crtc_state: CRTC, and its state, which has a shared DPLL
250  *
251  * Enable the shared DPLL used by @crtc.
252  */
intel_enable_shared_dpll(const struct intel_crtc_state * crtc_state)253 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
254 {
255 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
256 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
257 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
258 	unsigned int pipe_mask = BIT(crtc->pipe);
259 	unsigned int old_mask;
260 
261 	if (drm_WARN_ON(&i915->drm, pll == NULL))
262 		return;
263 
264 	mutex_lock(&i915->display.dpll.lock);
265 	old_mask = pll->active_mask;
266 
267 	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
268 	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
269 		goto out;
270 
271 	pll->active_mask |= pipe_mask;
272 
273 	drm_dbg_kms(&i915->drm,
274 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
275 		    pll->info->name, pll->active_mask, pll->on,
276 		    crtc->base.base.id, crtc->base.name);
277 
278 	if (old_mask) {
279 		drm_WARN_ON(&i915->drm, !pll->on);
280 		assert_shared_dpll_enabled(i915, pll);
281 		goto out;
282 	}
283 	drm_WARN_ON(&i915->drm, pll->on);
284 
285 	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
286 
287 	_intel_enable_shared_dpll(i915, pll);
288 
289 out:
290 	mutex_unlock(&i915->display.dpll.lock);
291 }
292 
293 /**
294  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
295  * @crtc_state: CRTC, and its state, which has a shared DPLL
296  *
297  * Disable the shared DPLL used by @crtc.
298  */
intel_disable_shared_dpll(const struct intel_crtc_state * crtc_state)299 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
300 {
301 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
302 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
303 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
304 	unsigned int pipe_mask = BIT(crtc->pipe);
305 
306 	/* PCH only available on ILK+ */
307 	if (DISPLAY_VER(i915) < 5)
308 		return;
309 
310 	if (pll == NULL)
311 		return;
312 
313 	mutex_lock(&i915->display.dpll.lock);
314 	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
315 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
316 		     crtc->base.base.id, crtc->base.name))
317 		goto out;
318 
319 	drm_dbg_kms(&i915->drm,
320 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
321 		    pll->info->name, pll->active_mask, pll->on,
322 		    crtc->base.base.id, crtc->base.name);
323 
324 	assert_shared_dpll_enabled(i915, pll);
325 	drm_WARN_ON(&i915->drm, !pll->on);
326 
327 	pll->active_mask &= ~pipe_mask;
328 	if (pll->active_mask)
329 		goto out;
330 
331 	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
332 
333 	_intel_disable_shared_dpll(i915, pll);
334 
335 out:
336 	mutex_unlock(&i915->display.dpll.lock);
337 }
338 
339 static unsigned long
intel_dpll_mask_all(struct drm_i915_private * i915)340 intel_dpll_mask_all(struct drm_i915_private *i915)
341 {
342 	struct intel_shared_dpll *pll;
343 	unsigned long dpll_mask = 0;
344 	int i;
345 
346 	for_each_shared_dpll(i915, pll, i) {
347 		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
348 
349 		dpll_mask |= BIT(pll->info->id);
350 	}
351 
352 	return dpll_mask;
353 }
354 
355 static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_dpll_hw_state * dpll_hw_state,unsigned long dpll_mask)356 intel_find_shared_dpll(struct intel_atomic_state *state,
357 		       const struct intel_crtc *crtc,
358 		       const struct intel_dpll_hw_state *dpll_hw_state,
359 		       unsigned long dpll_mask)
360 {
361 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
362 	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
363 	struct intel_shared_dpll_state *shared_dpll;
364 	struct intel_shared_dpll *unused_pll = NULL;
365 	enum intel_dpll_id id;
366 
367 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
368 
369 	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
370 
371 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
372 		struct intel_shared_dpll *pll;
373 
374 		pll = intel_get_shared_dpll_by_id(i915, id);
375 		if (!pll)
376 			continue;
377 
378 		/* Only want to check enabled timings first */
379 		if (shared_dpll[pll->index].pipe_mask == 0) {
380 			if (!unused_pll)
381 				unused_pll = pll;
382 			continue;
383 		}
384 
385 		if (memcmp(dpll_hw_state,
386 			   &shared_dpll[pll->index].hw_state,
387 			   sizeof(*dpll_hw_state)) == 0) {
388 			drm_dbg_kms(&i915->drm,
389 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
390 				    crtc->base.base.id, crtc->base.name,
391 				    pll->info->name,
392 				    shared_dpll[pll->index].pipe_mask,
393 				    pll->active_mask);
394 			return pll;
395 		}
396 	}
397 
398 	/* Ok no matching timings, maybe there's a free one? */
399 	if (unused_pll) {
400 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
401 			    crtc->base.base.id, crtc->base.name,
402 			    unused_pll->info->name);
403 		return unused_pll;
404 	}
405 
406 	return NULL;
407 }
408 
409 /**
410  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
411  * @crtc: CRTC on which behalf the reference is taken
412  * @pll: DPLL for which the reference is taken
413  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
414  *
415  * Take a reference for @pll tracking the use of it by @crtc.
416  */
417 static void
intel_reference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)418 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
419 				 const struct intel_shared_dpll *pll,
420 				 struct intel_shared_dpll_state *shared_dpll_state)
421 {
422 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
423 
424 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
425 
426 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
427 
428 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
429 		    crtc->base.base.id, crtc->base.name, pll->info->name);
430 }
431 
432 static void
intel_reference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)433 intel_reference_shared_dpll(struct intel_atomic_state *state,
434 			    const struct intel_crtc *crtc,
435 			    const struct intel_shared_dpll *pll,
436 			    const struct intel_dpll_hw_state *dpll_hw_state)
437 {
438 	struct intel_shared_dpll_state *shared_dpll;
439 
440 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
441 
442 	if (shared_dpll[pll->index].pipe_mask == 0)
443 		shared_dpll[pll->index].hw_state = *dpll_hw_state;
444 
445 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
446 }
447 
448 /**
449  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
450  * @crtc: CRTC on which behalf the reference is dropped
451  * @pll: DPLL for which the reference is dropped
452  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
453  *
454  * Drop a reference for @pll tracking the end of use of it by @crtc.
455  */
456 void
intel_unreference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)457 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
458 				   const struct intel_shared_dpll *pll,
459 				   struct intel_shared_dpll_state *shared_dpll_state)
460 {
461 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
462 
463 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
464 
465 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
466 
467 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
468 		    crtc->base.base.id, crtc->base.name, pll->info->name);
469 }
470 
intel_unreference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll)471 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
472 					  const struct intel_crtc *crtc,
473 					  const struct intel_shared_dpll *pll)
474 {
475 	struct intel_shared_dpll_state *shared_dpll;
476 
477 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
478 
479 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
480 }
481 
intel_put_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)482 static void intel_put_dpll(struct intel_atomic_state *state,
483 			   struct intel_crtc *crtc)
484 {
485 	const struct intel_crtc_state *old_crtc_state =
486 		intel_atomic_get_old_crtc_state(state, crtc);
487 	struct intel_crtc_state *new_crtc_state =
488 		intel_atomic_get_new_crtc_state(state, crtc);
489 
490 	new_crtc_state->shared_dpll = NULL;
491 
492 	if (!old_crtc_state->shared_dpll)
493 		return;
494 
495 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
496 }
497 
498 /**
499  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
500  * @state: atomic state
501  *
502  * This is the dpll version of drm_atomic_helper_swap_state() since the
503  * helper does not handle driver-specific global state.
504  *
505  * For consistency with atomic helpers this function does a complete swap,
506  * i.e. it also puts the current state into @state, even though there is no
507  * need for that at this moment.
508  */
intel_shared_dpll_swap_state(struct intel_atomic_state * state)509 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
510 {
511 	struct drm_i915_private *i915 = to_i915(state->base.dev);
512 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
513 	struct intel_shared_dpll *pll;
514 	int i;
515 
516 	if (!state->dpll_set)
517 		return;
518 
519 	for_each_shared_dpll(i915, pll, i)
520 		swap(pll->state, shared_dpll[pll->index]);
521 }
522 
ibx_pch_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)523 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
524 				      struct intel_shared_dpll *pll,
525 				      struct intel_dpll_hw_state *dpll_hw_state)
526 {
527 	struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
528 	const enum intel_dpll_id id = pll->info->id;
529 	intel_wakeref_t wakeref;
530 	u32 val;
531 
532 	wakeref = intel_display_power_get_if_enabled(i915,
533 						     POWER_DOMAIN_DISPLAY_CORE);
534 	if (!wakeref)
535 		return false;
536 
537 	val = intel_de_read(i915, PCH_DPLL(id));
538 	hw_state->dpll = val;
539 	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
540 	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
541 
542 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
543 
544 	return val & DPLL_VCO_ENABLE;
545 }
546 
ibx_assert_pch_refclk_enabled(struct drm_i915_private * i915)547 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
548 {
549 	struct intel_display *display = &i915->display;
550 	u32 val;
551 	bool enabled;
552 
553 	val = intel_de_read(display, PCH_DREF_CONTROL);
554 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
555 			    DREF_SUPERSPREAD_SOURCE_MASK));
556 	INTEL_DISPLAY_STATE_WARN(display, !enabled,
557 				 "PCH refclk assertion failure, should be active but is disabled\n");
558 }
559 
ibx_pch_dpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)560 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
561 				struct intel_shared_dpll *pll,
562 				const struct intel_dpll_hw_state *dpll_hw_state)
563 {
564 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
565 	const enum intel_dpll_id id = pll->info->id;
566 
567 	/* PCH refclock must be enabled first */
568 	ibx_assert_pch_refclk_enabled(i915);
569 
570 	intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
571 	intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
572 
573 	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
574 
575 	/* Wait for the clocks to stabilize. */
576 	intel_de_posting_read(i915, PCH_DPLL(id));
577 	udelay(150);
578 
579 	/* The pixel multiplier can only be updated once the
580 	 * DPLL is enabled and the clocks are stable.
581 	 *
582 	 * So write it again.
583 	 */
584 	intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
585 	intel_de_posting_read(i915, PCH_DPLL(id));
586 	udelay(200);
587 }
588 
ibx_pch_dpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)589 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
590 				 struct intel_shared_dpll *pll)
591 {
592 	const enum intel_dpll_id id = pll->info->id;
593 
594 	intel_de_write(i915, PCH_DPLL(id), 0);
595 	intel_de_posting_read(i915, PCH_DPLL(id));
596 	udelay(200);
597 }
598 
ibx_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)599 static int ibx_compute_dpll(struct intel_atomic_state *state,
600 			    struct intel_crtc *crtc,
601 			    struct intel_encoder *encoder)
602 {
603 	return 0;
604 }
605 
ibx_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)606 static int ibx_get_dpll(struct intel_atomic_state *state,
607 			struct intel_crtc *crtc,
608 			struct intel_encoder *encoder)
609 {
610 	struct intel_crtc_state *crtc_state =
611 		intel_atomic_get_new_crtc_state(state, crtc);
612 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
613 	struct intel_shared_dpll *pll;
614 	enum intel_dpll_id id;
615 
616 	if (HAS_PCH_IBX(i915)) {
617 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
618 		id = (enum intel_dpll_id) crtc->pipe;
619 		pll = intel_get_shared_dpll_by_id(i915, id);
620 
621 		drm_dbg_kms(&i915->drm,
622 			    "[CRTC:%d:%s] using pre-allocated %s\n",
623 			    crtc->base.base.id, crtc->base.name,
624 			    pll->info->name);
625 	} else {
626 		pll = intel_find_shared_dpll(state, crtc,
627 					     &crtc_state->dpll_hw_state,
628 					     BIT(DPLL_ID_PCH_PLL_B) |
629 					     BIT(DPLL_ID_PCH_PLL_A));
630 	}
631 
632 	if (!pll)
633 		return -EINVAL;
634 
635 	/* reference the pll */
636 	intel_reference_shared_dpll(state, crtc,
637 				    pll, &crtc_state->dpll_hw_state);
638 
639 	crtc_state->shared_dpll = pll;
640 
641 	return 0;
642 }
643 
ibx_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)644 static void ibx_dump_hw_state(struct drm_printer *p,
645 			      const struct intel_dpll_hw_state *dpll_hw_state)
646 {
647 	const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
648 
649 	drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
650 		   "fp0: 0x%x, fp1: 0x%x\n",
651 		   hw_state->dpll,
652 		   hw_state->dpll_md,
653 		   hw_state->fp0,
654 		   hw_state->fp1);
655 }
656 
ibx_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)657 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
658 				 const struct intel_dpll_hw_state *_b)
659 {
660 	const struct i9xx_dpll_hw_state *a = &_a->i9xx;
661 	const struct i9xx_dpll_hw_state *b = &_b->i9xx;
662 
663 	return a->dpll == b->dpll &&
664 		a->dpll_md == b->dpll_md &&
665 		a->fp0 == b->fp0 &&
666 		a->fp1 == b->fp1;
667 }
668 
669 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
670 	.enable = ibx_pch_dpll_enable,
671 	.disable = ibx_pch_dpll_disable,
672 	.get_hw_state = ibx_pch_dpll_get_hw_state,
673 };
674 
675 static const struct dpll_info pch_plls[] = {
676 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
677 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
678 	{}
679 };
680 
681 static const struct intel_dpll_mgr pch_pll_mgr = {
682 	.dpll_info = pch_plls,
683 	.compute_dplls = ibx_compute_dpll,
684 	.get_dplls = ibx_get_dpll,
685 	.put_dplls = intel_put_dpll,
686 	.dump_hw_state = ibx_dump_hw_state,
687 	.compare_hw_state = ibx_compare_hw_state,
688 };
689 
hsw_ddi_wrpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)690 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
691 				 struct intel_shared_dpll *pll,
692 				 const struct intel_dpll_hw_state *dpll_hw_state)
693 {
694 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
695 	const enum intel_dpll_id id = pll->info->id;
696 
697 	intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
698 	intel_de_posting_read(i915, WRPLL_CTL(id));
699 	udelay(20);
700 }
701 
hsw_ddi_spll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)702 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
703 				struct intel_shared_dpll *pll,
704 				const struct intel_dpll_hw_state *dpll_hw_state)
705 {
706 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
707 
708 	intel_de_write(i915, SPLL_CTL, hw_state->spll);
709 	intel_de_posting_read(i915, SPLL_CTL);
710 	udelay(20);
711 }
712 
hsw_ddi_wrpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)713 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
714 				  struct intel_shared_dpll *pll)
715 {
716 	const enum intel_dpll_id id = pll->info->id;
717 
718 	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
719 	intel_de_posting_read(i915, WRPLL_CTL(id));
720 
721 	/*
722 	 * Try to set up the PCH reference clock once all DPLLs
723 	 * that depend on it have been shut down.
724 	 */
725 	if (i915->display.dpll.pch_ssc_use & BIT(id))
726 		intel_init_pch_refclk(i915);
727 }
728 
hsw_ddi_spll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)729 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
730 				 struct intel_shared_dpll *pll)
731 {
732 	enum intel_dpll_id id = pll->info->id;
733 
734 	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
735 	intel_de_posting_read(i915, SPLL_CTL);
736 
737 	/*
738 	 * Try to set up the PCH reference clock once all DPLLs
739 	 * that depend on it have been shut down.
740 	 */
741 	if (i915->display.dpll.pch_ssc_use & BIT(id))
742 		intel_init_pch_refclk(i915);
743 }
744 
hsw_ddi_wrpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)745 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
746 				       struct intel_shared_dpll *pll,
747 				       struct intel_dpll_hw_state *dpll_hw_state)
748 {
749 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
750 	const enum intel_dpll_id id = pll->info->id;
751 	intel_wakeref_t wakeref;
752 	u32 val;
753 
754 	wakeref = intel_display_power_get_if_enabled(i915,
755 						     POWER_DOMAIN_DISPLAY_CORE);
756 	if (!wakeref)
757 		return false;
758 
759 	val = intel_de_read(i915, WRPLL_CTL(id));
760 	hw_state->wrpll = val;
761 
762 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
763 
764 	return val & WRPLL_PLL_ENABLE;
765 }
766 
hsw_ddi_spll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)767 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
768 				      struct intel_shared_dpll *pll,
769 				      struct intel_dpll_hw_state *dpll_hw_state)
770 {
771 	struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
772 	intel_wakeref_t wakeref;
773 	u32 val;
774 
775 	wakeref = intel_display_power_get_if_enabled(i915,
776 						     POWER_DOMAIN_DISPLAY_CORE);
777 	if (!wakeref)
778 		return false;
779 
780 	val = intel_de_read(i915, SPLL_CTL);
781 	hw_state->spll = val;
782 
783 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
784 
785 	return val & SPLL_PLL_ENABLE;
786 }
787 
788 #define LC_FREQ 2700
789 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
790 
791 #define P_MIN 2
792 #define P_MAX 64
793 #define P_INC 2
794 
795 /* Constraints for PLL good behavior */
796 #define REF_MIN 48
797 #define REF_MAX 400
798 #define VCO_MIN 2400
799 #define VCO_MAX 4800
800 
801 struct hsw_wrpll_rnp {
802 	unsigned p, n2, r2;
803 };
804 
hsw_wrpll_get_budget_for_freq(int clock)805 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
806 {
807 	switch (clock) {
808 	case 25175000:
809 	case 25200000:
810 	case 27000000:
811 	case 27027000:
812 	case 37762500:
813 	case 37800000:
814 	case 40500000:
815 	case 40541000:
816 	case 54000000:
817 	case 54054000:
818 	case 59341000:
819 	case 59400000:
820 	case 72000000:
821 	case 74176000:
822 	case 74250000:
823 	case 81000000:
824 	case 81081000:
825 	case 89012000:
826 	case 89100000:
827 	case 108000000:
828 	case 108108000:
829 	case 111264000:
830 	case 111375000:
831 	case 148352000:
832 	case 148500000:
833 	case 162000000:
834 	case 162162000:
835 	case 222525000:
836 	case 222750000:
837 	case 296703000:
838 	case 297000000:
839 		return 0;
840 	case 233500000:
841 	case 245250000:
842 	case 247750000:
843 	case 253250000:
844 	case 298000000:
845 		return 1500;
846 	case 169128000:
847 	case 169500000:
848 	case 179500000:
849 	case 202000000:
850 		return 2000;
851 	case 256250000:
852 	case 262500000:
853 	case 270000000:
854 	case 272500000:
855 	case 273750000:
856 	case 280750000:
857 	case 281250000:
858 	case 286000000:
859 	case 291750000:
860 		return 4000;
861 	case 267250000:
862 	case 268500000:
863 		return 5000;
864 	default:
865 		return 1000;
866 	}
867 }
868 
hsw_wrpll_update_rnp(u64 freq2k,unsigned int budget,unsigned int r2,unsigned int n2,unsigned int p,struct hsw_wrpll_rnp * best)869 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
870 				 unsigned int r2, unsigned int n2,
871 				 unsigned int p,
872 				 struct hsw_wrpll_rnp *best)
873 {
874 	u64 a, b, c, d, diff, diff_best;
875 
876 	/* No best (r,n,p) yet */
877 	if (best->p == 0) {
878 		best->p = p;
879 		best->n2 = n2;
880 		best->r2 = r2;
881 		return;
882 	}
883 
884 	/*
885 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
886 	 * freq2k.
887 	 *
888 	 * delta = 1e6 *
889 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
890 	 *	   freq2k;
891 	 *
892 	 * and we would like delta <= budget.
893 	 *
894 	 * If the discrepancy is above the PPM-based budget, always prefer to
895 	 * improve upon the previous solution.  However, if you're within the
896 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
897 	 */
898 	a = freq2k * budget * p * r2;
899 	b = freq2k * budget * best->p * best->r2;
900 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
901 	diff_best = abs_diff(freq2k * best->p * best->r2,
902 			     LC_FREQ_2K * best->n2);
903 	c = 1000000 * diff;
904 	d = 1000000 * diff_best;
905 
906 	if (a < c && b < d) {
907 		/* If both are above the budget, pick the closer */
908 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
909 			best->p = p;
910 			best->n2 = n2;
911 			best->r2 = r2;
912 		}
913 	} else if (a >= c && b < d) {
914 		/* If A is below the threshold but B is above it?  Update. */
915 		best->p = p;
916 		best->n2 = n2;
917 		best->r2 = r2;
918 	} else if (a >= c && b >= d) {
919 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
920 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
921 			best->p = p;
922 			best->n2 = n2;
923 			best->r2 = r2;
924 		}
925 	}
926 	/* Otherwise a < c && b >= d, do nothing */
927 }
928 
929 static void
hsw_ddi_calculate_wrpll(int clock,unsigned * r2_out,unsigned * n2_out,unsigned * p_out)930 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
931 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
932 {
933 	u64 freq2k;
934 	unsigned p, n2, r2;
935 	struct hsw_wrpll_rnp best = {};
936 	unsigned budget;
937 
938 	freq2k = clock / 100;
939 
940 	budget = hsw_wrpll_get_budget_for_freq(clock);
941 
942 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
943 	 * and directly pass the LC PLL to it. */
944 	if (freq2k == 5400000) {
945 		*n2_out = 2;
946 		*p_out = 1;
947 		*r2_out = 2;
948 		return;
949 	}
950 
951 	/*
952 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
953 	 * the WR PLL.
954 	 *
955 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
956 	 * Injecting R2 = 2 * R gives:
957 	 *   REF_MAX * r2 > LC_FREQ * 2 and
958 	 *   REF_MIN * r2 < LC_FREQ * 2
959 	 *
960 	 * Which means the desired boundaries for r2 are:
961 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
962 	 *
963 	 */
964 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
965 	     r2 <= LC_FREQ * 2 / REF_MIN;
966 	     r2++) {
967 
968 		/*
969 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
970 		 *
971 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
972 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
973 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
974 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
975 		 *
976 		 * Which means the desired boundaries for n2 are:
977 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
978 		 */
979 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
980 		     n2 <= VCO_MAX * r2 / LC_FREQ;
981 		     n2++) {
982 
983 			for (p = P_MIN; p <= P_MAX; p += P_INC)
984 				hsw_wrpll_update_rnp(freq2k, budget,
985 						     r2, n2, p, &best);
986 		}
987 	}
988 
989 	*n2_out = best.n2;
990 	*p_out = best.p;
991 	*r2_out = best.r2;
992 }
993 
hsw_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)994 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
995 				  const struct intel_shared_dpll *pll,
996 				  const struct intel_dpll_hw_state *dpll_hw_state)
997 {
998 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
999 	int refclk;
1000 	int n, p, r;
1001 	u32 wrpll = hw_state->wrpll;
1002 
1003 	switch (wrpll & WRPLL_REF_MASK) {
1004 	case WRPLL_REF_SPECIAL_HSW:
1005 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1006 		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
1007 			refclk = i915->display.dpll.ref_clks.nssc;
1008 			break;
1009 		}
1010 		fallthrough;
1011 	case WRPLL_REF_PCH_SSC:
1012 		/*
1013 		 * We could calculate spread here, but our checking
1014 		 * code only cares about 5% accuracy, and spread is a max of
1015 		 * 0.5% downspread.
1016 		 */
1017 		refclk = i915->display.dpll.ref_clks.ssc;
1018 		break;
1019 	case WRPLL_REF_LCPLL:
1020 		refclk = 2700000;
1021 		break;
1022 	default:
1023 		MISSING_CASE(wrpll);
1024 		return 0;
1025 	}
1026 
1027 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
1028 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1029 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1030 
1031 	/* Convert to KHz, p & r have a fixed point portion */
1032 	return (refclk * n / 10) / (p * r) * 2;
1033 }
1034 
1035 static int
hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1036 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1037 			   struct intel_crtc *crtc)
1038 {
1039 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1040 	struct intel_crtc_state *crtc_state =
1041 		intel_atomic_get_new_crtc_state(state, crtc);
1042 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1043 	unsigned int p, n2, r2;
1044 
1045 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1046 
1047 	hw_state->wrpll =
1048 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1049 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1050 		WRPLL_DIVIDER_POST(p);
1051 
1052 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1053 							&crtc_state->dpll_hw_state);
1054 
1055 	return 0;
1056 }
1057 
1058 static struct intel_shared_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1059 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1060 		       struct intel_crtc *crtc)
1061 {
1062 	struct intel_crtc_state *crtc_state =
1063 		intel_atomic_get_new_crtc_state(state, crtc);
1064 
1065 	return intel_find_shared_dpll(state, crtc,
1066 				      &crtc_state->dpll_hw_state,
1067 				      BIT(DPLL_ID_WRPLL2) |
1068 				      BIT(DPLL_ID_WRPLL1));
1069 }
1070 
1071 static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state * crtc_state)1072 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1073 {
1074 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1075 	int clock = crtc_state->port_clock;
1076 
1077 	switch (clock / 2) {
1078 	case 81000:
1079 	case 135000:
1080 	case 270000:
1081 		return 0;
1082 	default:
1083 		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1084 			    clock);
1085 		return -EINVAL;
1086 	}
1087 }
1088 
1089 static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state * crtc_state)1090 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1091 {
1092 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1093 	struct intel_shared_dpll *pll;
1094 	enum intel_dpll_id pll_id;
1095 	int clock = crtc_state->port_clock;
1096 
1097 	switch (clock / 2) {
1098 	case 81000:
1099 		pll_id = DPLL_ID_LCPLL_810;
1100 		break;
1101 	case 135000:
1102 		pll_id = DPLL_ID_LCPLL_1350;
1103 		break;
1104 	case 270000:
1105 		pll_id = DPLL_ID_LCPLL_2700;
1106 		break;
1107 	default:
1108 		MISSING_CASE(clock / 2);
1109 		return NULL;
1110 	}
1111 
1112 	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1113 
1114 	if (!pll)
1115 		return NULL;
1116 
1117 	return pll;
1118 }
1119 
hsw_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1120 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1121 				  const struct intel_shared_dpll *pll,
1122 				  const struct intel_dpll_hw_state *dpll_hw_state)
1123 {
1124 	int link_clock = 0;
1125 
1126 	switch (pll->info->id) {
1127 	case DPLL_ID_LCPLL_810:
1128 		link_clock = 81000;
1129 		break;
1130 	case DPLL_ID_LCPLL_1350:
1131 		link_clock = 135000;
1132 		break;
1133 	case DPLL_ID_LCPLL_2700:
1134 		link_clock = 270000;
1135 		break;
1136 	default:
1137 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1138 		break;
1139 	}
1140 
1141 	return link_clock * 2;
1142 }
1143 
1144 static int
hsw_ddi_spll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1145 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1146 			  struct intel_crtc *crtc)
1147 {
1148 	struct intel_crtc_state *crtc_state =
1149 		intel_atomic_get_new_crtc_state(state, crtc);
1150 	struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1151 
1152 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1153 		return -EINVAL;
1154 
1155 	hw_state->spll =
1156 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1157 
1158 	return 0;
1159 }
1160 
1161 static struct intel_shared_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1162 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1163 		      struct intel_crtc *crtc)
1164 {
1165 	struct intel_crtc_state *crtc_state =
1166 		intel_atomic_get_new_crtc_state(state, crtc);
1167 
1168 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1169 				      BIT(DPLL_ID_SPLL));
1170 }
1171 
hsw_ddi_spll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1172 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1173 				 const struct intel_shared_dpll *pll,
1174 				 const struct intel_dpll_hw_state *dpll_hw_state)
1175 {
1176 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1177 	int link_clock = 0;
1178 
1179 	switch (hw_state->spll & SPLL_FREQ_MASK) {
1180 	case SPLL_FREQ_810MHz:
1181 		link_clock = 81000;
1182 		break;
1183 	case SPLL_FREQ_1350MHz:
1184 		link_clock = 135000;
1185 		break;
1186 	case SPLL_FREQ_2700MHz:
1187 		link_clock = 270000;
1188 		break;
1189 	default:
1190 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1191 		break;
1192 	}
1193 
1194 	return link_clock * 2;
1195 }
1196 
hsw_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1197 static int hsw_compute_dpll(struct intel_atomic_state *state,
1198 			    struct intel_crtc *crtc,
1199 			    struct intel_encoder *encoder)
1200 {
1201 	struct intel_crtc_state *crtc_state =
1202 		intel_atomic_get_new_crtc_state(state, crtc);
1203 
1204 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1205 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1206 	else if (intel_crtc_has_dp_encoder(crtc_state))
1207 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1208 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1209 		return hsw_ddi_spll_compute_dpll(state, crtc);
1210 	else
1211 		return -EINVAL;
1212 }
1213 
hsw_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1214 static int hsw_get_dpll(struct intel_atomic_state *state,
1215 			struct intel_crtc *crtc,
1216 			struct intel_encoder *encoder)
1217 {
1218 	struct intel_crtc_state *crtc_state =
1219 		intel_atomic_get_new_crtc_state(state, crtc);
1220 	struct intel_shared_dpll *pll = NULL;
1221 
1222 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1223 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1224 	else if (intel_crtc_has_dp_encoder(crtc_state))
1225 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1226 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1227 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1228 
1229 	if (!pll)
1230 		return -EINVAL;
1231 
1232 	intel_reference_shared_dpll(state, crtc,
1233 				    pll, &crtc_state->dpll_hw_state);
1234 
1235 	crtc_state->shared_dpll = pll;
1236 
1237 	return 0;
1238 }
1239 
hsw_update_dpll_ref_clks(struct drm_i915_private * i915)1240 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1241 {
1242 	i915->display.dpll.ref_clks.ssc = 135000;
1243 	/* Non-SSC is only used on non-ULT HSW. */
1244 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1245 		i915->display.dpll.ref_clks.nssc = 24000;
1246 	else
1247 		i915->display.dpll.ref_clks.nssc = 135000;
1248 }
1249 
hsw_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1250 static void hsw_dump_hw_state(struct drm_printer *p,
1251 			      const struct intel_dpll_hw_state *dpll_hw_state)
1252 {
1253 	const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1254 
1255 	drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1256 		   hw_state->wrpll, hw_state->spll);
1257 }
1258 
hsw_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1259 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1260 				 const struct intel_dpll_hw_state *_b)
1261 {
1262 	const struct hsw_dpll_hw_state *a = &_a->hsw;
1263 	const struct hsw_dpll_hw_state *b = &_b->hsw;
1264 
1265 	return a->wrpll == b->wrpll &&
1266 		a->spll == b->spll;
1267 }
1268 
1269 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1270 	.enable = hsw_ddi_wrpll_enable,
1271 	.disable = hsw_ddi_wrpll_disable,
1272 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1273 	.get_freq = hsw_ddi_wrpll_get_freq,
1274 };
1275 
1276 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1277 	.enable = hsw_ddi_spll_enable,
1278 	.disable = hsw_ddi_spll_disable,
1279 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1280 	.get_freq = hsw_ddi_spll_get_freq,
1281 };
1282 
hsw_ddi_lcpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * hw_state)1283 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1284 				 struct intel_shared_dpll *pll,
1285 				 const struct intel_dpll_hw_state *hw_state)
1286 {
1287 }
1288 
hsw_ddi_lcpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1289 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1290 				  struct intel_shared_dpll *pll)
1291 {
1292 }
1293 
hsw_ddi_lcpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1294 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1295 				       struct intel_shared_dpll *pll,
1296 				       struct intel_dpll_hw_state *dpll_hw_state)
1297 {
1298 	return true;
1299 }
1300 
1301 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1302 	.enable = hsw_ddi_lcpll_enable,
1303 	.disable = hsw_ddi_lcpll_disable,
1304 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1305 	.get_freq = hsw_ddi_lcpll_get_freq,
1306 };
1307 
1308 static const struct dpll_info hsw_plls[] = {
1309 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1310 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1311 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1312 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1313 	  .always_on = true, },
1314 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1315 	  .always_on = true, },
1316 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1317 	  .always_on = true, },
1318 	{}
1319 };
1320 
1321 static const struct intel_dpll_mgr hsw_pll_mgr = {
1322 	.dpll_info = hsw_plls,
1323 	.compute_dplls = hsw_compute_dpll,
1324 	.get_dplls = hsw_get_dpll,
1325 	.put_dplls = intel_put_dpll,
1326 	.update_ref_clks = hsw_update_dpll_ref_clks,
1327 	.dump_hw_state = hsw_dump_hw_state,
1328 	.compare_hw_state = hsw_compare_hw_state,
1329 };
1330 
1331 struct skl_dpll_regs {
1332 	i915_reg_t ctl, cfgcr1, cfgcr2;
1333 };
1334 
1335 /* this array is indexed by the *shared* pll id */
1336 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1337 	{
1338 		/* DPLL 0 */
1339 		.ctl = LCPLL1_CTL,
1340 		/* DPLL 0 doesn't support HDMI mode */
1341 	},
1342 	{
1343 		/* DPLL 1 */
1344 		.ctl = LCPLL2_CTL,
1345 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1346 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1347 	},
1348 	{
1349 		/* DPLL 2 */
1350 		.ctl = WRPLL_CTL(0),
1351 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1352 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1353 	},
1354 	{
1355 		/* DPLL 3 */
1356 		.ctl = WRPLL_CTL(1),
1357 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1358 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1359 	},
1360 };
1361 
skl_ddi_pll_write_ctrl1(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct skl_dpll_hw_state * hw_state)1362 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1363 				    struct intel_shared_dpll *pll,
1364 				    const struct skl_dpll_hw_state *hw_state)
1365 {
1366 	const enum intel_dpll_id id = pll->info->id;
1367 
1368 	intel_de_rmw(i915, DPLL_CTRL1,
1369 		     DPLL_CTRL1_HDMI_MODE(id) |
1370 		     DPLL_CTRL1_SSC(id) |
1371 		     DPLL_CTRL1_LINK_RATE_MASK(id),
1372 		     hw_state->ctrl1 << (id * 6));
1373 	intel_de_posting_read(i915, DPLL_CTRL1);
1374 }
1375 
skl_ddi_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1376 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1377 			       struct intel_shared_dpll *pll,
1378 			       const struct intel_dpll_hw_state *dpll_hw_state)
1379 {
1380 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1381 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1382 	const enum intel_dpll_id id = pll->info->id;
1383 
1384 	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1385 
1386 	intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
1387 	intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
1388 	intel_de_posting_read(i915, regs[id].cfgcr1);
1389 	intel_de_posting_read(i915, regs[id].cfgcr2);
1390 
1391 	/* the enable bit is always bit 31 */
1392 	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1393 
1394 	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1395 		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1396 }
1397 
skl_ddi_dpll0_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1398 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1399 				 struct intel_shared_dpll *pll,
1400 				 const struct intel_dpll_hw_state *dpll_hw_state)
1401 {
1402 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1403 
1404 	skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1405 }
1406 
skl_ddi_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1407 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1408 				struct intel_shared_dpll *pll)
1409 {
1410 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1411 	const enum intel_dpll_id id = pll->info->id;
1412 
1413 	/* the enable bit is always bit 31 */
1414 	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1415 	intel_de_posting_read(i915, regs[id].ctl);
1416 }
1417 
skl_ddi_dpll0_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1418 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1419 				  struct intel_shared_dpll *pll)
1420 {
1421 }
1422 
skl_ddi_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1423 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1424 				     struct intel_shared_dpll *pll,
1425 				     struct intel_dpll_hw_state *dpll_hw_state)
1426 {
1427 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1428 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1429 	const enum intel_dpll_id id = pll->info->id;
1430 	intel_wakeref_t wakeref;
1431 	bool ret;
1432 	u32 val;
1433 
1434 	wakeref = intel_display_power_get_if_enabled(i915,
1435 						     POWER_DOMAIN_DISPLAY_CORE);
1436 	if (!wakeref)
1437 		return false;
1438 
1439 	ret = false;
1440 
1441 	val = intel_de_read(i915, regs[id].ctl);
1442 	if (!(val & LCPLL_PLL_ENABLE))
1443 		goto out;
1444 
1445 	val = intel_de_read(i915, DPLL_CTRL1);
1446 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1447 
1448 	/* avoid reading back stale values if HDMI mode is not enabled */
1449 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1450 		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1451 		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1452 	}
1453 	ret = true;
1454 
1455 out:
1456 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1457 
1458 	return ret;
1459 }
1460 
skl_ddi_dpll0_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1461 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1462 				       struct intel_shared_dpll *pll,
1463 				       struct intel_dpll_hw_state *dpll_hw_state)
1464 {
1465 	struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1466 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1467 	const enum intel_dpll_id id = pll->info->id;
1468 	intel_wakeref_t wakeref;
1469 	u32 val;
1470 	bool ret;
1471 
1472 	wakeref = intel_display_power_get_if_enabled(i915,
1473 						     POWER_DOMAIN_DISPLAY_CORE);
1474 	if (!wakeref)
1475 		return false;
1476 
1477 	ret = false;
1478 
1479 	/* DPLL0 is always enabled since it drives CDCLK */
1480 	val = intel_de_read(i915, regs[id].ctl);
1481 	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1482 		goto out;
1483 
1484 	val = intel_de_read(i915, DPLL_CTRL1);
1485 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1486 
1487 	ret = true;
1488 
1489 out:
1490 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1491 
1492 	return ret;
1493 }
1494 
1495 struct skl_wrpll_context {
1496 	u64 min_deviation;		/* current minimal deviation */
1497 	u64 central_freq;		/* chosen central freq */
1498 	u64 dco_freq;			/* chosen dco freq */
1499 	unsigned int p;			/* chosen divider */
1500 };
1501 
1502 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1503 #define SKL_DCO_MAX_PDEVIATION	100
1504 #define SKL_DCO_MAX_NDEVIATION	600
1505 
skl_wrpll_try_divider(struct skl_wrpll_context * ctx,u64 central_freq,u64 dco_freq,unsigned int divider)1506 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1507 				  u64 central_freq,
1508 				  u64 dco_freq,
1509 				  unsigned int divider)
1510 {
1511 	u64 deviation;
1512 
1513 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1514 			      central_freq);
1515 
1516 	/* positive deviation */
1517 	if (dco_freq >= central_freq) {
1518 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1519 		    deviation < ctx->min_deviation) {
1520 			ctx->min_deviation = deviation;
1521 			ctx->central_freq = central_freq;
1522 			ctx->dco_freq = dco_freq;
1523 			ctx->p = divider;
1524 		}
1525 	/* negative deviation */
1526 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1527 		   deviation < ctx->min_deviation) {
1528 		ctx->min_deviation = deviation;
1529 		ctx->central_freq = central_freq;
1530 		ctx->dco_freq = dco_freq;
1531 		ctx->p = divider;
1532 	}
1533 }
1534 
skl_wrpll_get_multipliers(unsigned int p,unsigned int * p0,unsigned int * p1,unsigned int * p2)1535 static void skl_wrpll_get_multipliers(unsigned int p,
1536 				      unsigned int *p0 /* out */,
1537 				      unsigned int *p1 /* out */,
1538 				      unsigned int *p2 /* out */)
1539 {
1540 	/* even dividers */
1541 	if (p % 2 == 0) {
1542 		unsigned int half = p / 2;
1543 
1544 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1545 			*p0 = 2;
1546 			*p1 = 1;
1547 			*p2 = half;
1548 		} else if (half % 2 == 0) {
1549 			*p0 = 2;
1550 			*p1 = half / 2;
1551 			*p2 = 2;
1552 		} else if (half % 3 == 0) {
1553 			*p0 = 3;
1554 			*p1 = half / 3;
1555 			*p2 = 2;
1556 		} else if (half % 7 == 0) {
1557 			*p0 = 7;
1558 			*p1 = half / 7;
1559 			*p2 = 2;
1560 		}
1561 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1562 		*p0 = 3;
1563 		*p1 = 1;
1564 		*p2 = p / 3;
1565 	} else if (p == 5 || p == 7) {
1566 		*p0 = p;
1567 		*p1 = 1;
1568 		*p2 = 1;
1569 	} else if (p == 15) {
1570 		*p0 = 3;
1571 		*p1 = 1;
1572 		*p2 = 5;
1573 	} else if (p == 21) {
1574 		*p0 = 7;
1575 		*p1 = 1;
1576 		*p2 = 3;
1577 	} else if (p == 35) {
1578 		*p0 = 7;
1579 		*p1 = 1;
1580 		*p2 = 5;
1581 	}
1582 }
1583 
1584 struct skl_wrpll_params {
1585 	u32 dco_fraction;
1586 	u32 dco_integer;
1587 	u32 qdiv_ratio;
1588 	u32 qdiv_mode;
1589 	u32 kdiv;
1590 	u32 pdiv;
1591 	u32 central_freq;
1592 };
1593 
skl_wrpll_params_populate(struct skl_wrpll_params * params,u64 afe_clock,int ref_clock,u64 central_freq,u32 p0,u32 p1,u32 p2)1594 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1595 				      u64 afe_clock,
1596 				      int ref_clock,
1597 				      u64 central_freq,
1598 				      u32 p0, u32 p1, u32 p2)
1599 {
1600 	u64 dco_freq;
1601 
1602 	switch (central_freq) {
1603 	case 9600000000ULL:
1604 		params->central_freq = 0;
1605 		break;
1606 	case 9000000000ULL:
1607 		params->central_freq = 1;
1608 		break;
1609 	case 8400000000ULL:
1610 		params->central_freq = 3;
1611 	}
1612 
1613 	switch (p0) {
1614 	case 1:
1615 		params->pdiv = 0;
1616 		break;
1617 	case 2:
1618 		params->pdiv = 1;
1619 		break;
1620 	case 3:
1621 		params->pdiv = 2;
1622 		break;
1623 	case 7:
1624 		params->pdiv = 4;
1625 		break;
1626 	default:
1627 		WARN(1, "Incorrect PDiv\n");
1628 	}
1629 
1630 	switch (p2) {
1631 	case 5:
1632 		params->kdiv = 0;
1633 		break;
1634 	case 2:
1635 		params->kdiv = 1;
1636 		break;
1637 	case 3:
1638 		params->kdiv = 2;
1639 		break;
1640 	case 1:
1641 		params->kdiv = 3;
1642 		break;
1643 	default:
1644 		WARN(1, "Incorrect KDiv\n");
1645 	}
1646 
1647 	params->qdiv_ratio = p1;
1648 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1649 
1650 	dco_freq = p0 * p1 * p2 * afe_clock;
1651 
1652 	/*
1653 	 * Intermediate values are in Hz.
1654 	 * Divide by MHz to match bsepc
1655 	 */
1656 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1657 	params->dco_fraction =
1658 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1659 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1660 }
1661 
1662 static int
skl_ddi_calculate_wrpll(int clock,int ref_clock,struct skl_wrpll_params * wrpll_params)1663 skl_ddi_calculate_wrpll(int clock,
1664 			int ref_clock,
1665 			struct skl_wrpll_params *wrpll_params)
1666 {
1667 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1668 						 9000000000ULL,
1669 						 9600000000ULL };
1670 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1671 					    24, 28, 30, 32, 36, 40, 42, 44,
1672 					    48, 52, 54, 56, 60, 64, 66, 68,
1673 					    70, 72, 76, 78, 80, 84, 88, 90,
1674 					    92, 96, 98 };
1675 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1676 	static const struct {
1677 		const u8 *list;
1678 		int n_dividers;
1679 	} dividers[] = {
1680 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1681 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1682 	};
1683 	struct skl_wrpll_context ctx = {
1684 		.min_deviation = U64_MAX,
1685 	};
1686 	unsigned int dco, d, i;
1687 	unsigned int p0, p1, p2;
1688 	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1689 
1690 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1691 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1692 			for (i = 0; i < dividers[d].n_dividers; i++) {
1693 				unsigned int p = dividers[d].list[i];
1694 				u64 dco_freq = p * afe_clock;
1695 
1696 				skl_wrpll_try_divider(&ctx,
1697 						      dco_central_freq[dco],
1698 						      dco_freq,
1699 						      p);
1700 				/*
1701 				 * Skip the remaining dividers if we're sure to
1702 				 * have found the definitive divider, we can't
1703 				 * improve a 0 deviation.
1704 				 */
1705 				if (ctx.min_deviation == 0)
1706 					goto skip_remaining_dividers;
1707 			}
1708 		}
1709 
1710 skip_remaining_dividers:
1711 		/*
1712 		 * If a solution is found with an even divider, prefer
1713 		 * this one.
1714 		 */
1715 		if (d == 0 && ctx.p)
1716 			break;
1717 	}
1718 
1719 	if (!ctx.p)
1720 		return -EINVAL;
1721 
1722 	/*
1723 	 * gcc incorrectly analyses that these can be used without being
1724 	 * initialized. To be fair, it's hard to guess.
1725 	 */
1726 	p0 = p1 = p2 = 0;
1727 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1728 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1729 				  ctx.central_freq, p0, p1, p2);
1730 
1731 	return 0;
1732 }
1733 
skl_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1734 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1735 				  const struct intel_shared_dpll *pll,
1736 				  const struct intel_dpll_hw_state *dpll_hw_state)
1737 {
1738 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1739 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1740 	u32 p0, p1, p2, dco_freq;
1741 
1742 	p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1743 	p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1744 
1745 	if (hw_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1746 		p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1747 	else
1748 		p1 = 1;
1749 
1750 
1751 	switch (p0) {
1752 	case DPLL_CFGCR2_PDIV_1:
1753 		p0 = 1;
1754 		break;
1755 	case DPLL_CFGCR2_PDIV_2:
1756 		p0 = 2;
1757 		break;
1758 	case DPLL_CFGCR2_PDIV_3:
1759 		p0 = 3;
1760 		break;
1761 	case DPLL_CFGCR2_PDIV_7_INVALID:
1762 		/*
1763 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1764 		 * handling it the same way as PDIV_7.
1765 		 */
1766 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1767 		fallthrough;
1768 	case DPLL_CFGCR2_PDIV_7:
1769 		p0 = 7;
1770 		break;
1771 	default:
1772 		MISSING_CASE(p0);
1773 		return 0;
1774 	}
1775 
1776 	switch (p2) {
1777 	case DPLL_CFGCR2_KDIV_5:
1778 		p2 = 5;
1779 		break;
1780 	case DPLL_CFGCR2_KDIV_2:
1781 		p2 = 2;
1782 		break;
1783 	case DPLL_CFGCR2_KDIV_3:
1784 		p2 = 3;
1785 		break;
1786 	case DPLL_CFGCR2_KDIV_1:
1787 		p2 = 1;
1788 		break;
1789 	default:
1790 		MISSING_CASE(p2);
1791 		return 0;
1792 	}
1793 
1794 	dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1795 		   ref_clock;
1796 
1797 	dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1798 		    ref_clock / 0x8000;
1799 
1800 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1801 		return 0;
1802 
1803 	return dco_freq / (p0 * p1 * p2 * 5);
1804 }
1805 
skl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)1806 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1807 {
1808 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1809 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1810 	struct skl_wrpll_params wrpll_params = {};
1811 	int ret;
1812 
1813 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1814 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1815 	if (ret)
1816 		return ret;
1817 
1818 	/*
1819 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1820 	 * as the DPLL id in this function.
1821 	 */
1822 	hw_state->ctrl1 =
1823 		DPLL_CTRL1_OVERRIDE(0) |
1824 		DPLL_CTRL1_HDMI_MODE(0);
1825 
1826 	hw_state->cfgcr1 =
1827 		DPLL_CFGCR1_FREQ_ENABLE |
1828 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1829 		wrpll_params.dco_integer;
1830 
1831 	hw_state->cfgcr2 =
1832 		DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1833 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1834 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1835 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1836 		wrpll_params.central_freq;
1837 
1838 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1839 							&crtc_state->dpll_hw_state);
1840 
1841 	return 0;
1842 }
1843 
1844 static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1845 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1846 {
1847 	struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1848 	u32 ctrl1;
1849 
1850 	/*
1851 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1852 	 * as the DPLL id in this function.
1853 	 */
1854 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1855 	switch (crtc_state->port_clock / 2) {
1856 	case 81000:
1857 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1858 		break;
1859 	case 135000:
1860 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1861 		break;
1862 	case 270000:
1863 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1864 		break;
1865 		/* eDP 1.4 rates */
1866 	case 162000:
1867 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1868 		break;
1869 	case 108000:
1870 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1871 		break;
1872 	case 216000:
1873 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1874 		break;
1875 	}
1876 
1877 	hw_state->ctrl1 = ctrl1;
1878 
1879 	return 0;
1880 }
1881 
skl_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1882 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1883 				  const struct intel_shared_dpll *pll,
1884 				  const struct intel_dpll_hw_state *dpll_hw_state)
1885 {
1886 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1887 	int link_clock = 0;
1888 
1889 	switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1890 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1891 	case DPLL_CTRL1_LINK_RATE_810:
1892 		link_clock = 81000;
1893 		break;
1894 	case DPLL_CTRL1_LINK_RATE_1080:
1895 		link_clock = 108000;
1896 		break;
1897 	case DPLL_CTRL1_LINK_RATE_1350:
1898 		link_clock = 135000;
1899 		break;
1900 	case DPLL_CTRL1_LINK_RATE_1620:
1901 		link_clock = 162000;
1902 		break;
1903 	case DPLL_CTRL1_LINK_RATE_2160:
1904 		link_clock = 216000;
1905 		break;
1906 	case DPLL_CTRL1_LINK_RATE_2700:
1907 		link_clock = 270000;
1908 		break;
1909 	default:
1910 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1911 		break;
1912 	}
1913 
1914 	return link_clock * 2;
1915 }
1916 
skl_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1917 static int skl_compute_dpll(struct intel_atomic_state *state,
1918 			    struct intel_crtc *crtc,
1919 			    struct intel_encoder *encoder)
1920 {
1921 	struct intel_crtc_state *crtc_state =
1922 		intel_atomic_get_new_crtc_state(state, crtc);
1923 
1924 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1925 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1926 	else if (intel_crtc_has_dp_encoder(crtc_state))
1927 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1928 	else
1929 		return -EINVAL;
1930 }
1931 
skl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1932 static int skl_get_dpll(struct intel_atomic_state *state,
1933 			struct intel_crtc *crtc,
1934 			struct intel_encoder *encoder)
1935 {
1936 	struct intel_crtc_state *crtc_state =
1937 		intel_atomic_get_new_crtc_state(state, crtc);
1938 	struct intel_shared_dpll *pll;
1939 
1940 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1941 		pll = intel_find_shared_dpll(state, crtc,
1942 					     &crtc_state->dpll_hw_state,
1943 					     BIT(DPLL_ID_SKL_DPLL0));
1944 	else
1945 		pll = intel_find_shared_dpll(state, crtc,
1946 					     &crtc_state->dpll_hw_state,
1947 					     BIT(DPLL_ID_SKL_DPLL3) |
1948 					     BIT(DPLL_ID_SKL_DPLL2) |
1949 					     BIT(DPLL_ID_SKL_DPLL1));
1950 	if (!pll)
1951 		return -EINVAL;
1952 
1953 	intel_reference_shared_dpll(state, crtc,
1954 				    pll, &crtc_state->dpll_hw_state);
1955 
1956 	crtc_state->shared_dpll = pll;
1957 
1958 	return 0;
1959 }
1960 
skl_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1961 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1962 				const struct intel_shared_dpll *pll,
1963 				const struct intel_dpll_hw_state *dpll_hw_state)
1964 {
1965 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1966 
1967 	/*
1968 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1969 	 * the internal shift for each field
1970 	 */
1971 	if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1972 		return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
1973 	else
1974 		return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
1975 }
1976 
skl_update_dpll_ref_clks(struct drm_i915_private * i915)1977 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1978 {
1979 	/* No SSC ref */
1980 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1981 }
1982 
skl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1983 static void skl_dump_hw_state(struct drm_printer *p,
1984 			      const struct intel_dpll_hw_state *dpll_hw_state)
1985 {
1986 	const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1987 
1988 	drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1989 		   hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1990 }
1991 
skl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1992 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1993 				 const struct intel_dpll_hw_state *_b)
1994 {
1995 	const struct skl_dpll_hw_state *a = &_a->skl;
1996 	const struct skl_dpll_hw_state *b = &_b->skl;
1997 
1998 	return a->ctrl1 == b->ctrl1 &&
1999 		a->cfgcr1 == b->cfgcr1 &&
2000 		a->cfgcr2 == b->cfgcr2;
2001 }
2002 
2003 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2004 	.enable = skl_ddi_pll_enable,
2005 	.disable = skl_ddi_pll_disable,
2006 	.get_hw_state = skl_ddi_pll_get_hw_state,
2007 	.get_freq = skl_ddi_pll_get_freq,
2008 };
2009 
2010 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2011 	.enable = skl_ddi_dpll0_enable,
2012 	.disable = skl_ddi_dpll0_disable,
2013 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
2014 	.get_freq = skl_ddi_pll_get_freq,
2015 };
2016 
2017 static const struct dpll_info skl_plls[] = {
2018 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2019 	  .always_on = true, },
2020 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2021 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2022 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2023 	{}
2024 };
2025 
2026 static const struct intel_dpll_mgr skl_pll_mgr = {
2027 	.dpll_info = skl_plls,
2028 	.compute_dplls = skl_compute_dpll,
2029 	.get_dplls = skl_get_dpll,
2030 	.put_dplls = intel_put_dpll,
2031 	.update_ref_clks = skl_update_dpll_ref_clks,
2032 	.dump_hw_state = skl_dump_hw_state,
2033 	.compare_hw_state = skl_compare_hw_state,
2034 };
2035 
bxt_ddi_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2036 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
2037 			       struct intel_shared_dpll *pll,
2038 			       const struct intel_dpll_hw_state *dpll_hw_state)
2039 {
2040 	struct intel_display *display = &i915->display;
2041 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2042 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2043 	enum dpio_phy phy;
2044 	enum dpio_channel ch;
2045 	u32 temp;
2046 
2047 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2048 
2049 	/* Non-SSC reference */
2050 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2051 
2052 	if (IS_GEMINILAKE(i915)) {
2053 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2054 			     0, PORT_PLL_POWER_ENABLE);
2055 
2056 		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2057 				 PORT_PLL_POWER_STATE), 200))
2058 			drm_err(&i915->drm,
2059 				"Power state not set for PLL:%d\n", port);
2060 	}
2061 
2062 	/* Disable 10 bit clock */
2063 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2064 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
2065 
2066 	/* Write P1 & P2 */
2067 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2068 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2069 
2070 	/* Write M2 integer */
2071 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2072 		     PORT_PLL_M2_INT_MASK, hw_state->pll0);
2073 
2074 	/* Write N */
2075 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2076 		     PORT_PLL_N_MASK, hw_state->pll1);
2077 
2078 	/* Write M2 fraction */
2079 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2080 		     PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2081 
2082 	/* Write M2 fraction enable */
2083 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2084 		     PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2085 
2086 	/* Write coeff */
2087 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2088 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2089 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2090 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2091 	temp |= hw_state->pll6;
2092 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2093 
2094 	/* Write calibration val */
2095 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2096 		     PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2097 
2098 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2099 		     PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2100 
2101 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2102 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2103 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2104 	temp |= hw_state->pll10;
2105 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2106 
2107 	/* Recalibrate with new settings */
2108 	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2109 	temp |= PORT_PLL_RECALIBRATE;
2110 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2111 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2112 	temp |= hw_state->ebb4;
2113 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2114 
2115 	/* Enable PLL */
2116 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2117 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2118 
2119 	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2120 			200))
2121 		drm_err(&i915->drm, "PLL %d not locked\n", port);
2122 
2123 	if (IS_GEMINILAKE(i915)) {
2124 		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2125 		temp |= DCC_DELAY_RANGE_2;
2126 		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2127 	}
2128 
2129 	/*
2130 	 * While we write to the group register to program all lanes at once we
2131 	 * can read only lane registers and we pick lanes 0/1 for that.
2132 	 */
2133 	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2134 	temp &= ~LANE_STAGGER_MASK;
2135 	temp &= ~LANESTAGGER_STRAP_OVRD;
2136 	temp |= hw_state->pcsdw12;
2137 	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2138 }
2139 
bxt_ddi_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)2140 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2141 				struct intel_shared_dpll *pll)
2142 {
2143 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2144 
2145 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2146 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2147 
2148 	if (IS_GEMINILAKE(i915)) {
2149 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2150 			     PORT_PLL_POWER_ENABLE, 0);
2151 
2152 		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2153 				  PORT_PLL_POWER_STATE), 200))
2154 			drm_err(&i915->drm,
2155 				"Power state not reset for PLL:%d\n", port);
2156 	}
2157 }
2158 
bxt_ddi_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)2159 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2160 				     struct intel_shared_dpll *pll,
2161 				     struct intel_dpll_hw_state *dpll_hw_state)
2162 {
2163 	struct intel_display *display = &i915->display;
2164 	struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2165 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2166 	intel_wakeref_t wakeref;
2167 	enum dpio_phy phy;
2168 	enum dpio_channel ch;
2169 	u32 val;
2170 	bool ret;
2171 
2172 	bxt_port_to_phy_channel(display, port, &phy, &ch);
2173 
2174 	wakeref = intel_display_power_get_if_enabled(i915,
2175 						     POWER_DOMAIN_DISPLAY_CORE);
2176 	if (!wakeref)
2177 		return false;
2178 
2179 	ret = false;
2180 
2181 	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2182 	if (!(val & PORT_PLL_ENABLE))
2183 		goto out;
2184 
2185 	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2186 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2187 
2188 	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2189 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2190 
2191 	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2192 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2193 
2194 	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2195 	hw_state->pll1 &= PORT_PLL_N_MASK;
2196 
2197 	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2198 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2199 
2200 	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2201 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2202 
2203 	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2204 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2205 			  PORT_PLL_INT_COEFF_MASK |
2206 			  PORT_PLL_GAIN_CTL_MASK;
2207 
2208 	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2209 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2210 
2211 	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2212 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2213 
2214 	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2215 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2216 			   PORT_PLL_DCO_AMP_MASK;
2217 
2218 	/*
2219 	 * While we write to the group register to program all lanes at once we
2220 	 * can read only lane registers. We configure all lanes the same way, so
2221 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2222 	 */
2223 	hw_state->pcsdw12 = intel_de_read(i915,
2224 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2225 	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2226 		drm_dbg(&i915->drm,
2227 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2228 			hw_state->pcsdw12,
2229 			intel_de_read(i915,
2230 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2231 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2232 
2233 	ret = true;
2234 
2235 out:
2236 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2237 
2238 	return ret;
2239 }
2240 
2241 /* pre-calculated values for DP linkrates */
2242 static const struct dpll bxt_dp_clk_val[] = {
2243 	/* m2 is .22 binary fixed point */
2244 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2245 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2246 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2247 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2248 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2249 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2250 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2251 };
2252 
2253 static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2254 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2255 			  struct dpll *clk_div)
2256 {
2257 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2258 
2259 	/* Calculate HDMI div */
2260 	/*
2261 	 * FIXME: tie the following calculation into
2262 	 * i9xx_crtc_compute_clock
2263 	 */
2264 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2265 		return -EINVAL;
2266 
2267 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2268 
2269 	return 0;
2270 }
2271 
bxt_ddi_dp_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2272 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2273 				    struct dpll *clk_div)
2274 {
2275 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2276 	int i;
2277 
2278 	*clk_div = bxt_dp_clk_val[0];
2279 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2280 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2281 			*clk_div = bxt_dp_clk_val[i];
2282 			break;
2283 		}
2284 	}
2285 
2286 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2287 
2288 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2289 		    clk_div->dot != crtc_state->port_clock);
2290 }
2291 
bxt_ddi_set_dpll_hw_state(struct intel_crtc_state * crtc_state,const struct dpll * clk_div)2292 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2293 				     const struct dpll *clk_div)
2294 {
2295 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2296 	struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2297 	int clock = crtc_state->port_clock;
2298 	int vco = clk_div->vco;
2299 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2300 	u32 lanestagger;
2301 
2302 	if (vco >= 6200000 && vco <= 6700000) {
2303 		prop_coef = 4;
2304 		int_coef = 9;
2305 		gain_ctl = 3;
2306 		targ_cnt = 8;
2307 	} else if ((vco > 5400000 && vco < 6200000) ||
2308 			(vco >= 4800000 && vco < 5400000)) {
2309 		prop_coef = 5;
2310 		int_coef = 11;
2311 		gain_ctl = 3;
2312 		targ_cnt = 9;
2313 	} else if (vco == 5400000) {
2314 		prop_coef = 3;
2315 		int_coef = 8;
2316 		gain_ctl = 1;
2317 		targ_cnt = 9;
2318 	} else {
2319 		drm_err(&i915->drm, "Invalid VCO\n");
2320 		return -EINVAL;
2321 	}
2322 
2323 	if (clock > 270000)
2324 		lanestagger = 0x18;
2325 	else if (clock > 135000)
2326 		lanestagger = 0x0d;
2327 	else if (clock > 67000)
2328 		lanestagger = 0x07;
2329 	else if (clock > 33000)
2330 		lanestagger = 0x04;
2331 	else
2332 		lanestagger = 0x02;
2333 
2334 	hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2335 	hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2336 	hw_state->pll1 = PORT_PLL_N(clk_div->n);
2337 	hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2338 
2339 	if (clk_div->m2 & 0x3fffff)
2340 		hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2341 
2342 	hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2343 		PORT_PLL_INT_COEFF(int_coef) |
2344 		PORT_PLL_GAIN_CTL(gain_ctl);
2345 
2346 	hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2347 
2348 	hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2349 
2350 	hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2351 		PORT_PLL_DCO_AMP_OVR_EN_H;
2352 
2353 	hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2354 
2355 	hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2356 
2357 	return 0;
2358 }
2359 
bxt_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2360 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2361 				const struct intel_shared_dpll *pll,
2362 				const struct intel_dpll_hw_state *dpll_hw_state)
2363 {
2364 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2365 	struct dpll clock;
2366 
2367 	clock.m1 = 2;
2368 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2369 	if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2370 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2371 					  hw_state->pll2);
2372 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2373 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2374 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2375 
2376 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2377 }
2378 
2379 static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2380 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2381 {
2382 	struct dpll clk_div = {};
2383 
2384 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2385 
2386 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2387 }
2388 
2389 static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2390 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2391 {
2392 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2393 	struct dpll clk_div = {};
2394 	int ret;
2395 
2396 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2397 
2398 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2399 	if (ret)
2400 		return ret;
2401 
2402 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2403 						      &crtc_state->dpll_hw_state);
2404 
2405 	return 0;
2406 }
2407 
bxt_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2408 static int bxt_compute_dpll(struct intel_atomic_state *state,
2409 			    struct intel_crtc *crtc,
2410 			    struct intel_encoder *encoder)
2411 {
2412 	struct intel_crtc_state *crtc_state =
2413 		intel_atomic_get_new_crtc_state(state, crtc);
2414 
2415 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2416 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2417 	else if (intel_crtc_has_dp_encoder(crtc_state))
2418 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2419 	else
2420 		return -EINVAL;
2421 }
2422 
bxt_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2423 static int bxt_get_dpll(struct intel_atomic_state *state,
2424 			struct intel_crtc *crtc,
2425 			struct intel_encoder *encoder)
2426 {
2427 	struct intel_crtc_state *crtc_state =
2428 		intel_atomic_get_new_crtc_state(state, crtc);
2429 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2430 	struct intel_shared_dpll *pll;
2431 	enum intel_dpll_id id;
2432 
2433 	/* 1:1 mapping between ports and PLLs */
2434 	id = (enum intel_dpll_id) encoder->port;
2435 	pll = intel_get_shared_dpll_by_id(i915, id);
2436 
2437 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2438 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2439 
2440 	intel_reference_shared_dpll(state, crtc,
2441 				    pll, &crtc_state->dpll_hw_state);
2442 
2443 	crtc_state->shared_dpll = pll;
2444 
2445 	return 0;
2446 }
2447 
bxt_update_dpll_ref_clks(struct drm_i915_private * i915)2448 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2449 {
2450 	i915->display.dpll.ref_clks.ssc = 100000;
2451 	i915->display.dpll.ref_clks.nssc = 100000;
2452 	/* DSI non-SSC ref 19.2MHz */
2453 }
2454 
bxt_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)2455 static void bxt_dump_hw_state(struct drm_printer *p,
2456 			      const struct intel_dpll_hw_state *dpll_hw_state)
2457 {
2458 	const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2459 
2460 	drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2461 		   "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2462 		   "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2463 		   hw_state->ebb0, hw_state->ebb4,
2464 		   hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2465 		   hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2466 		   hw_state->pcsdw12);
2467 }
2468 
bxt_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)2469 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2470 				 const struct intel_dpll_hw_state *_b)
2471 {
2472 	const struct bxt_dpll_hw_state *a = &_a->bxt;
2473 	const struct bxt_dpll_hw_state *b = &_b->bxt;
2474 
2475 	return a->ebb0 == b->ebb0 &&
2476 		a->ebb4 == b->ebb4 &&
2477 		a->pll0 == b->pll0 &&
2478 		a->pll1 == b->pll1 &&
2479 		a->pll2 == b->pll2 &&
2480 		a->pll3 == b->pll3 &&
2481 		a->pll6 == b->pll6 &&
2482 		a->pll8 == b->pll8 &&
2483 		a->pll10 == b->pll10 &&
2484 		a->pcsdw12 == b->pcsdw12;
2485 }
2486 
2487 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2488 	.enable = bxt_ddi_pll_enable,
2489 	.disable = bxt_ddi_pll_disable,
2490 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2491 	.get_freq = bxt_ddi_pll_get_freq,
2492 };
2493 
2494 static const struct dpll_info bxt_plls[] = {
2495 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2496 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2497 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2498 	{}
2499 };
2500 
2501 static const struct intel_dpll_mgr bxt_pll_mgr = {
2502 	.dpll_info = bxt_plls,
2503 	.compute_dplls = bxt_compute_dpll,
2504 	.get_dplls = bxt_get_dpll,
2505 	.put_dplls = intel_put_dpll,
2506 	.update_ref_clks = bxt_update_dpll_ref_clks,
2507 	.dump_hw_state = bxt_dump_hw_state,
2508 	.compare_hw_state = bxt_compare_hw_state,
2509 };
2510 
icl_wrpll_get_multipliers(int bestdiv,int * pdiv,int * qdiv,int * kdiv)2511 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2512 				      int *qdiv, int *kdiv)
2513 {
2514 	/* even dividers */
2515 	if (bestdiv % 2 == 0) {
2516 		if (bestdiv == 2) {
2517 			*pdiv = 2;
2518 			*qdiv = 1;
2519 			*kdiv = 1;
2520 		} else if (bestdiv % 4 == 0) {
2521 			*pdiv = 2;
2522 			*qdiv = bestdiv / 4;
2523 			*kdiv = 2;
2524 		} else if (bestdiv % 6 == 0) {
2525 			*pdiv = 3;
2526 			*qdiv = bestdiv / 6;
2527 			*kdiv = 2;
2528 		} else if (bestdiv % 5 == 0) {
2529 			*pdiv = 5;
2530 			*qdiv = bestdiv / 10;
2531 			*kdiv = 2;
2532 		} else if (bestdiv % 14 == 0) {
2533 			*pdiv = 7;
2534 			*qdiv = bestdiv / 14;
2535 			*kdiv = 2;
2536 		}
2537 	} else {
2538 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2539 			*pdiv = bestdiv;
2540 			*qdiv = 1;
2541 			*kdiv = 1;
2542 		} else { /* 9, 15, 21 */
2543 			*pdiv = bestdiv / 3;
2544 			*qdiv = 1;
2545 			*kdiv = 3;
2546 		}
2547 	}
2548 }
2549 
icl_wrpll_params_populate(struct skl_wrpll_params * params,u32 dco_freq,u32 ref_freq,int pdiv,int qdiv,int kdiv)2550 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2551 				      u32 dco_freq, u32 ref_freq,
2552 				      int pdiv, int qdiv, int kdiv)
2553 {
2554 	u32 dco;
2555 
2556 	switch (kdiv) {
2557 	case 1:
2558 		params->kdiv = 1;
2559 		break;
2560 	case 2:
2561 		params->kdiv = 2;
2562 		break;
2563 	case 3:
2564 		params->kdiv = 4;
2565 		break;
2566 	default:
2567 		WARN(1, "Incorrect KDiv\n");
2568 	}
2569 
2570 	switch (pdiv) {
2571 	case 2:
2572 		params->pdiv = 1;
2573 		break;
2574 	case 3:
2575 		params->pdiv = 2;
2576 		break;
2577 	case 5:
2578 		params->pdiv = 4;
2579 		break;
2580 	case 7:
2581 		params->pdiv = 8;
2582 		break;
2583 	default:
2584 		WARN(1, "Incorrect PDiv\n");
2585 	}
2586 
2587 	WARN_ON(kdiv != 2 && qdiv != 1);
2588 
2589 	params->qdiv_ratio = qdiv;
2590 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2591 
2592 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2593 
2594 	params->dco_integer = dco >> 15;
2595 	params->dco_fraction = dco & 0x7fff;
2596 }
2597 
2598 /*
2599  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2600  * Program half of the nominal DCO divider fraction value.
2601  */
2602 static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private * i915)2603 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2604 {
2605 	return ((IS_ELKHARTLAKE(i915) &&
2606 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2607 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2608 		 i915->display.dpll.ref_clks.nssc == 38400;
2609 }
2610 
2611 struct icl_combo_pll_params {
2612 	int clock;
2613 	struct skl_wrpll_params wrpll;
2614 };
2615 
2616 /*
2617  * These values alrea already adjusted: they're the bits we write to the
2618  * registers, not the logical values.
2619  */
2620 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2621 	{ 540000,
2622 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2623 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2624 	{ 270000,
2625 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2626 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2627 	{ 162000,
2628 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2629 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2630 	{ 324000,
2631 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2632 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2633 	{ 216000,
2634 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2635 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2636 	{ 432000,
2637 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2638 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2639 	{ 648000,
2640 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2641 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2642 	{ 810000,
2643 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2644 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2645 };
2646 
2647 
2648 /* Also used for 38.4 MHz values. */
2649 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2650 	{ 540000,
2651 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2652 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2653 	{ 270000,
2654 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2655 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2656 	{ 162000,
2657 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2658 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2659 	{ 324000,
2660 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2661 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2662 	{ 216000,
2663 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2664 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2665 	{ 432000,
2666 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2667 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2668 	{ 648000,
2669 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2670 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2671 	{ 810000,
2672 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2673 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2674 };
2675 
2676 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2677 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2678 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2679 };
2680 
2681 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2682 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2683 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2684 };
2685 
2686 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2687 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2688 	/* the following params are unused */
2689 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2690 };
2691 
2692 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2693 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2694 	/* the following params are unused */
2695 };
2696 
icl_calc_dp_combo_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2697 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2698 				 struct skl_wrpll_params *pll_params)
2699 {
2700 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2701 	const struct icl_combo_pll_params *params =
2702 		i915->display.dpll.ref_clks.nssc == 24000 ?
2703 		icl_dp_combo_pll_24MHz_values :
2704 		icl_dp_combo_pll_19_2MHz_values;
2705 	int clock = crtc_state->port_clock;
2706 	int i;
2707 
2708 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2709 		if (clock == params[i].clock) {
2710 			*pll_params = params[i].wrpll;
2711 			return 0;
2712 		}
2713 	}
2714 
2715 	MISSING_CASE(clock);
2716 	return -EINVAL;
2717 }
2718 
icl_calc_tbt_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2719 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2720 			    struct skl_wrpll_params *pll_params)
2721 {
2722 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2723 
2724 	if (DISPLAY_VER(i915) >= 12) {
2725 		switch (i915->display.dpll.ref_clks.nssc) {
2726 		default:
2727 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2728 			fallthrough;
2729 		case 19200:
2730 		case 38400:
2731 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2732 			break;
2733 		case 24000:
2734 			*pll_params = tgl_tbt_pll_24MHz_values;
2735 			break;
2736 		}
2737 	} else {
2738 		switch (i915->display.dpll.ref_clks.nssc) {
2739 		default:
2740 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2741 			fallthrough;
2742 		case 19200:
2743 		case 38400:
2744 			*pll_params = icl_tbt_pll_19_2MHz_values;
2745 			break;
2746 		case 24000:
2747 			*pll_params = icl_tbt_pll_24MHz_values;
2748 			break;
2749 		}
2750 	}
2751 
2752 	return 0;
2753 }
2754 
icl_ddi_tbt_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2755 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2756 				    const struct intel_shared_dpll *pll,
2757 				    const struct intel_dpll_hw_state *dpll_hw_state)
2758 {
2759 	/*
2760 	 * The PLL outputs multiple frequencies at the same time, selection is
2761 	 * made at DDI clock mux level.
2762 	 */
2763 	drm_WARN_ON(&i915->drm, 1);
2764 
2765 	return 0;
2766 }
2767 
icl_wrpll_ref_clock(struct drm_i915_private * i915)2768 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2769 {
2770 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2771 
2772 	/*
2773 	 * For ICL+, the spec states: if reference frequency is 38.4,
2774 	 * use 19.2 because the DPLL automatically divides that by 2.
2775 	 */
2776 	if (ref_clock == 38400)
2777 		ref_clock = 19200;
2778 
2779 	return ref_clock;
2780 }
2781 
2782 static int
icl_calc_wrpll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * wrpll_params)2783 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2784 	       struct skl_wrpll_params *wrpll_params)
2785 {
2786 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2787 	int ref_clock = icl_wrpll_ref_clock(i915);
2788 	u32 afe_clock = crtc_state->port_clock * 5;
2789 	u32 dco_min = 7998000;
2790 	u32 dco_max = 10000000;
2791 	u32 dco_mid = (dco_min + dco_max) / 2;
2792 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2793 					 18, 20, 24, 28, 30, 32,  36,  40,
2794 					 42, 44, 48, 50, 52, 54,  56,  60,
2795 					 64, 66, 68, 70, 72, 76,  78,  80,
2796 					 84, 88, 90, 92, 96, 98, 100, 102,
2797 					  3,  5,  7,  9, 15, 21 };
2798 	u32 dco, best_dco = 0, dco_centrality = 0;
2799 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2800 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2801 
2802 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2803 		dco = afe_clock * dividers[d];
2804 
2805 		if (dco <= dco_max && dco >= dco_min) {
2806 			dco_centrality = abs(dco - dco_mid);
2807 
2808 			if (dco_centrality < best_dco_centrality) {
2809 				best_dco_centrality = dco_centrality;
2810 				best_div = dividers[d];
2811 				best_dco = dco;
2812 			}
2813 		}
2814 	}
2815 
2816 	if (best_div == 0)
2817 		return -EINVAL;
2818 
2819 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2820 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2821 				  pdiv, qdiv, kdiv);
2822 
2823 	return 0;
2824 }
2825 
icl_ddi_combo_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2826 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2827 				      const struct intel_shared_dpll *pll,
2828 				      const struct intel_dpll_hw_state *dpll_hw_state)
2829 {
2830 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2831 	int ref_clock = icl_wrpll_ref_clock(i915);
2832 	u32 dco_fraction;
2833 	u32 p0, p1, p2, dco_freq;
2834 
2835 	p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2836 	p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2837 
2838 	if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2839 		p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2840 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2841 	else
2842 		p1 = 1;
2843 
2844 	switch (p0) {
2845 	case DPLL_CFGCR1_PDIV_2:
2846 		p0 = 2;
2847 		break;
2848 	case DPLL_CFGCR1_PDIV_3:
2849 		p0 = 3;
2850 		break;
2851 	case DPLL_CFGCR1_PDIV_5:
2852 		p0 = 5;
2853 		break;
2854 	case DPLL_CFGCR1_PDIV_7:
2855 		p0 = 7;
2856 		break;
2857 	}
2858 
2859 	switch (p2) {
2860 	case DPLL_CFGCR1_KDIV_1:
2861 		p2 = 1;
2862 		break;
2863 	case DPLL_CFGCR1_KDIV_2:
2864 		p2 = 2;
2865 		break;
2866 	case DPLL_CFGCR1_KDIV_3:
2867 		p2 = 3;
2868 		break;
2869 	}
2870 
2871 	dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2872 		   ref_clock;
2873 
2874 	dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2875 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2876 
2877 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2878 		dco_fraction *= 2;
2879 
2880 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2881 
2882 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2883 		return 0;
2884 
2885 	return dco_freq / (p0 * p1 * p2 * 5);
2886 }
2887 
icl_calc_dpll_state(struct drm_i915_private * i915,const struct skl_wrpll_params * pll_params,struct intel_dpll_hw_state * dpll_hw_state)2888 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2889 				const struct skl_wrpll_params *pll_params,
2890 				struct intel_dpll_hw_state *dpll_hw_state)
2891 {
2892 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2893 	u32 dco_fraction = pll_params->dco_fraction;
2894 
2895 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2896 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2897 
2898 	hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2899 			    pll_params->dco_integer;
2900 
2901 	hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2902 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2903 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2904 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2905 
2906 	if (DISPLAY_VER(i915) >= 12)
2907 		hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2908 	else
2909 		hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2910 
2911 	if (i915->display.vbt.override_afc_startup)
2912 		hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2913 }
2914 
icl_mg_pll_find_divisors(int clock_khz,bool is_dp,bool use_ssc,u32 * target_dco_khz,struct icl_dpll_hw_state * hw_state,bool is_dkl)2915 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2916 				    u32 *target_dco_khz,
2917 				    struct icl_dpll_hw_state *hw_state,
2918 				    bool is_dkl)
2919 {
2920 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2921 	u32 dco_min_freq, dco_max_freq;
2922 	unsigned int i;
2923 	int div2;
2924 
2925 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2926 	dco_max_freq = is_dp ? 8100000 : 10000000;
2927 
2928 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2929 		int div1 = div1_vals[i];
2930 
2931 		for (div2 = 10; div2 > 0; div2--) {
2932 			int dco = div1 * div2 * clock_khz * 5;
2933 			int a_divratio, tlinedrv, inputsel;
2934 			u32 hsdiv;
2935 
2936 			if (dco < dco_min_freq || dco > dco_max_freq)
2937 				continue;
2938 
2939 			if (div2 >= 2) {
2940 				/*
2941 				 * Note: a_divratio not matching TGL BSpec
2942 				 * algorithm but matching hardcoded values and
2943 				 * working on HW for DP alt-mode at least
2944 				 */
2945 				a_divratio = is_dp ? 10 : 5;
2946 				tlinedrv = is_dkl ? 1 : 2;
2947 			} else {
2948 				a_divratio = 5;
2949 				tlinedrv = 0;
2950 			}
2951 			inputsel = is_dp ? 0 : 1;
2952 
2953 			switch (div1) {
2954 			default:
2955 				MISSING_CASE(div1);
2956 				fallthrough;
2957 			case 2:
2958 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2959 				break;
2960 			case 3:
2961 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2962 				break;
2963 			case 5:
2964 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2965 				break;
2966 			case 7:
2967 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2968 				break;
2969 			}
2970 
2971 			*target_dco_khz = dco;
2972 
2973 			hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2974 
2975 			hw_state->mg_clktop2_coreclkctl1 =
2976 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2977 
2978 			hw_state->mg_clktop2_hsclkctl =
2979 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2980 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2981 				hsdiv |
2982 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2983 
2984 			return 0;
2985 		}
2986 	}
2987 
2988 	return -EINVAL;
2989 }
2990 
2991 /*
2992  * The specification for this function uses real numbers, so the math had to be
2993  * adapted to integer-only calculation, that's why it looks so different.
2994  */
icl_calc_mg_pll_state(struct intel_crtc_state * crtc_state,struct intel_dpll_hw_state * dpll_hw_state)2995 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2996 				 struct intel_dpll_hw_state *dpll_hw_state)
2997 {
2998 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2999 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3000 	int refclk_khz = i915->display.dpll.ref_clks.nssc;
3001 	int clock = crtc_state->port_clock;
3002 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3003 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3004 	u32 prop_coeff, int_coeff;
3005 	u32 tdc_targetcnt, feedfwgain;
3006 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3007 	u64 tmp;
3008 	bool use_ssc = false;
3009 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3010 	bool is_dkl = DISPLAY_VER(i915) >= 12;
3011 	int ret;
3012 
3013 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3014 				       hw_state, is_dkl);
3015 	if (ret)
3016 		return ret;
3017 
3018 	m1div = 2;
3019 	m2div_int = dco_khz / (refclk_khz * m1div);
3020 	if (m2div_int > 255) {
3021 		if (!is_dkl) {
3022 			m1div = 4;
3023 			m2div_int = dco_khz / (refclk_khz * m1div);
3024 		}
3025 
3026 		if (m2div_int > 255)
3027 			return -EINVAL;
3028 	}
3029 	m2div_rem = dco_khz % (refclk_khz * m1div);
3030 
3031 	tmp = (u64)m2div_rem * (1 << 22);
3032 	do_div(tmp, refclk_khz * m1div);
3033 	m2div_frac = tmp;
3034 
3035 	switch (refclk_khz) {
3036 	case 19200:
3037 		iref_ndiv = 1;
3038 		iref_trim = 28;
3039 		iref_pulse_w = 1;
3040 		break;
3041 	case 24000:
3042 		iref_ndiv = 1;
3043 		iref_trim = 25;
3044 		iref_pulse_w = 2;
3045 		break;
3046 	case 38400:
3047 		iref_ndiv = 2;
3048 		iref_trim = 28;
3049 		iref_pulse_w = 1;
3050 		break;
3051 	default:
3052 		MISSING_CASE(refclk_khz);
3053 		return -EINVAL;
3054 	}
3055 
3056 	/*
3057 	 * tdc_res = 0.000003
3058 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3059 	 *
3060 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3061 	 * was supposed to be a division, but we rearranged the operations of
3062 	 * the formula to avoid early divisions so we don't multiply the
3063 	 * rounding errors.
3064 	 *
3065 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3066 	 * we also rearrange to work with integers.
3067 	 *
3068 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3069 	 * last division by 10.
3070 	 */
3071 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3072 
3073 	/*
3074 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3075 	 * 32 bits. That's not a problem since we round the division down
3076 	 * anyway.
3077 	 */
3078 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3079 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3080 
3081 	if (dco_khz >= 9000000) {
3082 		prop_coeff = 5;
3083 		int_coeff = 10;
3084 	} else {
3085 		prop_coeff = 4;
3086 		int_coeff = 8;
3087 	}
3088 
3089 	if (use_ssc) {
3090 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3091 		do_div(tmp, refclk_khz * m1div * 10000);
3092 		ssc_stepsize = tmp;
3093 
3094 		tmp = mul_u32_u32(dco_khz, 1000);
3095 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3096 	} else {
3097 		ssc_stepsize = 0;
3098 		ssc_steplen = 0;
3099 	}
3100 	ssc_steplog = 4;
3101 
3102 	/* write pll_state calculations */
3103 	if (is_dkl) {
3104 		hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3105 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3106 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3107 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3108 		if (i915->display.vbt.override_afc_startup) {
3109 			u8 val = i915->display.vbt.override_afc_startup_val;
3110 
3111 			hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3112 		}
3113 
3114 		hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3115 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3116 
3117 		hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3118 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3119 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3120 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3121 
3122 		hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3123 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3124 
3125 		hw_state->mg_pll_tdc_coldst_bias =
3126 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3127 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3128 
3129 	} else {
3130 		hw_state->mg_pll_div0 =
3131 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3132 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3133 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3134 
3135 		hw_state->mg_pll_div1 =
3136 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3137 			MG_PLL_DIV1_DITHER_DIV_2 |
3138 			MG_PLL_DIV1_NDIVRATIO(1) |
3139 			MG_PLL_DIV1_FBPREDIV(m1div);
3140 
3141 		hw_state->mg_pll_lf =
3142 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3143 			MG_PLL_LF_AFCCNTSEL_512 |
3144 			MG_PLL_LF_GAINCTRL(1) |
3145 			MG_PLL_LF_INT_COEFF(int_coeff) |
3146 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3147 
3148 		hw_state->mg_pll_frac_lock =
3149 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3150 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3151 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3152 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3153 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3154 		if (use_ssc || m2div_rem > 0)
3155 			hw_state->mg_pll_frac_lock |=
3156 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3157 
3158 		hw_state->mg_pll_ssc =
3159 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3160 			MG_PLL_SSC_TYPE(2) |
3161 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3162 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3163 			MG_PLL_SSC_FLLEN |
3164 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3165 
3166 		hw_state->mg_pll_tdc_coldst_bias =
3167 			MG_PLL_TDC_COLDST_COLDSTART |
3168 			MG_PLL_TDC_COLDST_IREFINT_EN |
3169 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3170 			MG_PLL_TDC_TDCOVCCORR_EN |
3171 			MG_PLL_TDC_TDCSEL(3);
3172 
3173 		hw_state->mg_pll_bias =
3174 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3175 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3176 			MG_PLL_BIAS_BIAS_BONUS(10) |
3177 			MG_PLL_BIAS_BIASCAL_EN |
3178 			MG_PLL_BIAS_CTRIM(12) |
3179 			MG_PLL_BIAS_VREF_RDAC(4) |
3180 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3181 
3182 		if (refclk_khz == 38400) {
3183 			hw_state->mg_pll_tdc_coldst_bias_mask =
3184 				MG_PLL_TDC_COLDST_COLDSTART;
3185 			hw_state->mg_pll_bias_mask = 0;
3186 		} else {
3187 			hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3188 			hw_state->mg_pll_bias_mask = -1U;
3189 		}
3190 
3191 		hw_state->mg_pll_tdc_coldst_bias &=
3192 			hw_state->mg_pll_tdc_coldst_bias_mask;
3193 		hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3194 	}
3195 
3196 	return 0;
3197 }
3198 
icl_ddi_mg_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3199 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3200 				   const struct intel_shared_dpll *pll,
3201 				   const struct intel_dpll_hw_state *dpll_hw_state)
3202 {
3203 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3204 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3205 	u64 tmp;
3206 
3207 	ref_clock = i915->display.dpll.ref_clks.nssc;
3208 
3209 	if (DISPLAY_VER(i915) >= 12) {
3210 		m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3211 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3212 		m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3213 
3214 		if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3215 			m2_frac = hw_state->mg_pll_bias &
3216 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3217 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3218 		} else {
3219 			m2_frac = 0;
3220 		}
3221 	} else {
3222 		m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3223 		m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3224 
3225 		if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3226 			m2_frac = hw_state->mg_pll_div0 &
3227 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3228 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3229 		} else {
3230 			m2_frac = 0;
3231 		}
3232 	}
3233 
3234 	switch (hw_state->mg_clktop2_hsclkctl &
3235 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3236 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3237 		div1 = 2;
3238 		break;
3239 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3240 		div1 = 3;
3241 		break;
3242 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3243 		div1 = 5;
3244 		break;
3245 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3246 		div1 = 7;
3247 		break;
3248 	default:
3249 		MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3250 		return 0;
3251 	}
3252 
3253 	div2 = (hw_state->mg_clktop2_hsclkctl &
3254 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3255 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3256 
3257 	/* div2 value of 0 is same as 1 means no div */
3258 	if (div2 == 0)
3259 		div2 = 1;
3260 
3261 	/*
3262 	 * Adjust the original formula to delay the division by 2^22 in order to
3263 	 * minimize possible rounding errors.
3264 	 */
3265 	tmp = (u64)m1 * m2_int * ref_clock +
3266 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3267 	tmp = div_u64(tmp, 5 * div1 * div2);
3268 
3269 	return tmp;
3270 }
3271 
3272 /**
3273  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3274  * @crtc_state: state for the CRTC to select the DPLL for
3275  * @port_dpll_id: the active @port_dpll_id to select
3276  *
3277  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3278  * CRTC.
3279  */
icl_set_active_port_dpll(struct intel_crtc_state * crtc_state,enum icl_port_dpll_id port_dpll_id)3280 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3281 			      enum icl_port_dpll_id port_dpll_id)
3282 {
3283 	struct icl_port_dpll *port_dpll =
3284 		&crtc_state->icl_port_dplls[port_dpll_id];
3285 
3286 	crtc_state->shared_dpll = port_dpll->pll;
3287 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3288 }
3289 
icl_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3290 static void icl_update_active_dpll(struct intel_atomic_state *state,
3291 				   struct intel_crtc *crtc,
3292 				   struct intel_encoder *encoder)
3293 {
3294 	struct intel_crtc_state *crtc_state =
3295 		intel_atomic_get_new_crtc_state(state, crtc);
3296 	struct intel_digital_port *primary_port;
3297 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3298 
3299 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3300 		enc_to_mst(encoder)->primary :
3301 		enc_to_dig_port(encoder);
3302 
3303 	if (primary_port &&
3304 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3305 	     intel_tc_port_in_legacy_mode(primary_port)))
3306 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3307 
3308 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3309 }
3310 
icl_compute_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)3311 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3312 				      struct intel_crtc *crtc)
3313 {
3314 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3315 	struct intel_crtc_state *crtc_state =
3316 		intel_atomic_get_new_crtc_state(state, crtc);
3317 	struct icl_port_dpll *port_dpll =
3318 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3319 	struct skl_wrpll_params pll_params = {};
3320 	int ret;
3321 
3322 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3323 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3324 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3325 	else
3326 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3327 
3328 	if (ret)
3329 		return ret;
3330 
3331 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3332 
3333 	/* this is mainly for the fastset check */
3334 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3335 
3336 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3337 							    &port_dpll->hw_state);
3338 
3339 	return 0;
3340 }
3341 
icl_get_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3342 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3343 				  struct intel_crtc *crtc,
3344 				  struct intel_encoder *encoder)
3345 {
3346 	struct intel_display *display = to_intel_display(crtc);
3347 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3348 	struct intel_crtc_state *crtc_state =
3349 		intel_atomic_get_new_crtc_state(state, crtc);
3350 	struct icl_port_dpll *port_dpll =
3351 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3352 	enum port port = encoder->port;
3353 	unsigned long dpll_mask;
3354 
3355 	if (IS_ALDERLAKE_S(i915)) {
3356 		dpll_mask =
3357 			BIT(DPLL_ID_DG1_DPLL3) |
3358 			BIT(DPLL_ID_DG1_DPLL2) |
3359 			BIT(DPLL_ID_ICL_DPLL1) |
3360 			BIT(DPLL_ID_ICL_DPLL0);
3361 	} else if (IS_DG1(i915)) {
3362 		if (port == PORT_D || port == PORT_E) {
3363 			dpll_mask =
3364 				BIT(DPLL_ID_DG1_DPLL2) |
3365 				BIT(DPLL_ID_DG1_DPLL3);
3366 		} else {
3367 			dpll_mask =
3368 				BIT(DPLL_ID_DG1_DPLL0) |
3369 				BIT(DPLL_ID_DG1_DPLL1);
3370 		}
3371 	} else if (IS_ROCKETLAKE(i915)) {
3372 		dpll_mask =
3373 			BIT(DPLL_ID_EHL_DPLL4) |
3374 			BIT(DPLL_ID_ICL_DPLL1) |
3375 			BIT(DPLL_ID_ICL_DPLL0);
3376 	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3377 		   port != PORT_A) {
3378 		dpll_mask =
3379 			BIT(DPLL_ID_EHL_DPLL4) |
3380 			BIT(DPLL_ID_ICL_DPLL1) |
3381 			BIT(DPLL_ID_ICL_DPLL0);
3382 	} else {
3383 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3384 	}
3385 
3386 	/* Eliminate DPLLs from consideration if reserved by HTI */
3387 	dpll_mask &= ~intel_hti_dpll_mask(display);
3388 
3389 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3390 						&port_dpll->hw_state,
3391 						dpll_mask);
3392 	if (!port_dpll->pll)
3393 		return -EINVAL;
3394 
3395 	intel_reference_shared_dpll(state, crtc,
3396 				    port_dpll->pll, &port_dpll->hw_state);
3397 
3398 	icl_update_active_dpll(state, crtc, encoder);
3399 
3400 	return 0;
3401 }
3402 
icl_compute_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3403 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3404 				    struct intel_crtc *crtc)
3405 {
3406 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3407 	struct intel_crtc_state *crtc_state =
3408 		intel_atomic_get_new_crtc_state(state, crtc);
3409 	const struct intel_crtc_state *old_crtc_state =
3410 		intel_atomic_get_old_crtc_state(state, crtc);
3411 	struct icl_port_dpll *port_dpll =
3412 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3413 	struct skl_wrpll_params pll_params = {};
3414 	int ret;
3415 
3416 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3417 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3418 	if (ret)
3419 		return ret;
3420 
3421 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3422 
3423 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3424 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3425 	if (ret)
3426 		return ret;
3427 
3428 	/* this is mainly for the fastset check */
3429 	if (old_crtc_state->shared_dpll &&
3430 	    old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3431 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3432 	else
3433 		icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3434 
3435 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3436 							 &port_dpll->hw_state);
3437 
3438 	return 0;
3439 }
3440 
icl_get_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3441 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3442 				struct intel_crtc *crtc,
3443 				struct intel_encoder *encoder)
3444 {
3445 	struct intel_crtc_state *crtc_state =
3446 		intel_atomic_get_new_crtc_state(state, crtc);
3447 	struct icl_port_dpll *port_dpll =
3448 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3449 	enum intel_dpll_id dpll_id;
3450 	int ret;
3451 
3452 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3453 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3454 						&port_dpll->hw_state,
3455 						BIT(DPLL_ID_ICL_TBTPLL));
3456 	if (!port_dpll->pll)
3457 		return -EINVAL;
3458 	intel_reference_shared_dpll(state, crtc,
3459 				    port_dpll->pll, &port_dpll->hw_state);
3460 
3461 
3462 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3463 	dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3464 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3465 						&port_dpll->hw_state,
3466 						BIT(dpll_id));
3467 	if (!port_dpll->pll) {
3468 		ret = -EINVAL;
3469 		goto err_unreference_tbt_pll;
3470 	}
3471 	intel_reference_shared_dpll(state, crtc,
3472 				    port_dpll->pll, &port_dpll->hw_state);
3473 
3474 	icl_update_active_dpll(state, crtc, encoder);
3475 
3476 	return 0;
3477 
3478 err_unreference_tbt_pll:
3479 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3480 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3481 
3482 	return ret;
3483 }
3484 
icl_compute_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3485 static int icl_compute_dplls(struct intel_atomic_state *state,
3486 			     struct intel_crtc *crtc,
3487 			     struct intel_encoder *encoder)
3488 {
3489 	if (intel_encoder_is_combo(encoder))
3490 		return icl_compute_combo_phy_dpll(state, crtc);
3491 	else if (intel_encoder_is_tc(encoder))
3492 		return icl_compute_tc_phy_dplls(state, crtc);
3493 
3494 	MISSING_CASE(encoder->port);
3495 
3496 	return 0;
3497 }
3498 
icl_get_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3499 static int icl_get_dplls(struct intel_atomic_state *state,
3500 			 struct intel_crtc *crtc,
3501 			 struct intel_encoder *encoder)
3502 {
3503 	if (intel_encoder_is_combo(encoder))
3504 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3505 	else if (intel_encoder_is_tc(encoder))
3506 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3507 
3508 	MISSING_CASE(encoder->port);
3509 
3510 	return -EINVAL;
3511 }
3512 
icl_put_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3513 static void icl_put_dplls(struct intel_atomic_state *state,
3514 			  struct intel_crtc *crtc)
3515 {
3516 	const struct intel_crtc_state *old_crtc_state =
3517 		intel_atomic_get_old_crtc_state(state, crtc);
3518 	struct intel_crtc_state *new_crtc_state =
3519 		intel_atomic_get_new_crtc_state(state, crtc);
3520 	enum icl_port_dpll_id id;
3521 
3522 	new_crtc_state->shared_dpll = NULL;
3523 
3524 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3525 		const struct icl_port_dpll *old_port_dpll =
3526 			&old_crtc_state->icl_port_dplls[id];
3527 		struct icl_port_dpll *new_port_dpll =
3528 			&new_crtc_state->icl_port_dplls[id];
3529 
3530 		new_port_dpll->pll = NULL;
3531 
3532 		if (!old_port_dpll->pll)
3533 			continue;
3534 
3535 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3536 	}
3537 }
3538 
mg_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3539 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3540 				struct intel_shared_dpll *pll,
3541 				struct intel_dpll_hw_state *dpll_hw_state)
3542 {
3543 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3544 	const enum intel_dpll_id id = pll->info->id;
3545 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3546 	intel_wakeref_t wakeref;
3547 	bool ret = false;
3548 	u32 val;
3549 
3550 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3551 
3552 	wakeref = intel_display_power_get_if_enabled(i915,
3553 						     POWER_DOMAIN_DISPLAY_CORE);
3554 	if (!wakeref)
3555 		return false;
3556 
3557 	val = intel_de_read(i915, enable_reg);
3558 	if (!(val & PLL_ENABLE))
3559 		goto out;
3560 
3561 	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3562 						  MG_REFCLKIN_CTL(tc_port));
3563 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3564 
3565 	hw_state->mg_clktop2_coreclkctl1 =
3566 		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3567 	hw_state->mg_clktop2_coreclkctl1 &=
3568 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3569 
3570 	hw_state->mg_clktop2_hsclkctl =
3571 		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3572 	hw_state->mg_clktop2_hsclkctl &=
3573 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3574 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3575 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3576 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3577 
3578 	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3579 	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3580 	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3581 	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3582 						   MG_PLL_FRAC_LOCK(tc_port));
3583 	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3584 
3585 	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3586 	hw_state->mg_pll_tdc_coldst_bias =
3587 		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3588 
3589 	if (i915->display.dpll.ref_clks.nssc == 38400) {
3590 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3591 		hw_state->mg_pll_bias_mask = 0;
3592 	} else {
3593 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3594 		hw_state->mg_pll_bias_mask = -1U;
3595 	}
3596 
3597 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3598 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3599 
3600 	ret = true;
3601 out:
3602 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3603 	return ret;
3604 }
3605 
dkl_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3606 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3607 				 struct intel_shared_dpll *pll,
3608 				 struct intel_dpll_hw_state *dpll_hw_state)
3609 {
3610 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3611 	const enum intel_dpll_id id = pll->info->id;
3612 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3613 	intel_wakeref_t wakeref;
3614 	bool ret = false;
3615 	u32 val;
3616 
3617 	wakeref = intel_display_power_get_if_enabled(i915,
3618 						     POWER_DOMAIN_DISPLAY_CORE);
3619 	if (!wakeref)
3620 		return false;
3621 
3622 	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3623 	if (!(val & PLL_ENABLE))
3624 		goto out;
3625 
3626 	/*
3627 	 * All registers read here have the same HIP_INDEX_REG even though
3628 	 * they are on different building blocks
3629 	 */
3630 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3631 						       DKL_REFCLKIN_CTL(tc_port));
3632 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3633 
3634 	hw_state->mg_clktop2_hsclkctl =
3635 		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3636 	hw_state->mg_clktop2_hsclkctl &=
3637 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3638 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3639 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3640 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3641 
3642 	hw_state->mg_clktop2_coreclkctl1 =
3643 		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3644 	hw_state->mg_clktop2_coreclkctl1 &=
3645 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3646 
3647 	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3648 	val = DKL_PLL_DIV0_MASK;
3649 	if (i915->display.vbt.override_afc_startup)
3650 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3651 	hw_state->mg_pll_div0 &= val;
3652 
3653 	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3654 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3655 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3656 
3657 	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3658 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3659 				 DKL_PLL_SSC_STEP_LEN_MASK |
3660 				 DKL_PLL_SSC_STEP_NUM_MASK |
3661 				 DKL_PLL_SSC_EN);
3662 
3663 	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3664 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3665 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3666 
3667 	hw_state->mg_pll_tdc_coldst_bias =
3668 		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3669 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3670 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3671 
3672 	ret = true;
3673 out:
3674 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3675 	return ret;
3676 }
3677 
icl_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state,i915_reg_t enable_reg)3678 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3679 				 struct intel_shared_dpll *pll,
3680 				 struct intel_dpll_hw_state *dpll_hw_state,
3681 				 i915_reg_t enable_reg)
3682 {
3683 	struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3684 	const enum intel_dpll_id id = pll->info->id;
3685 	intel_wakeref_t wakeref;
3686 	bool ret = false;
3687 	u32 val;
3688 
3689 	wakeref = intel_display_power_get_if_enabled(i915,
3690 						     POWER_DOMAIN_DISPLAY_CORE);
3691 	if (!wakeref)
3692 		return false;
3693 
3694 	val = intel_de_read(i915, enable_reg);
3695 	if (!(val & PLL_ENABLE))
3696 		goto out;
3697 
3698 	if (IS_ALDERLAKE_S(i915)) {
3699 		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3700 		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3701 	} else if (IS_DG1(i915)) {
3702 		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3703 		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3704 	} else if (IS_ROCKETLAKE(i915)) {
3705 		hw_state->cfgcr0 = intel_de_read(i915,
3706 						 RKL_DPLL_CFGCR0(id));
3707 		hw_state->cfgcr1 = intel_de_read(i915,
3708 						 RKL_DPLL_CFGCR1(id));
3709 	} else if (DISPLAY_VER(i915) >= 12) {
3710 		hw_state->cfgcr0 = intel_de_read(i915,
3711 						 TGL_DPLL_CFGCR0(id));
3712 		hw_state->cfgcr1 = intel_de_read(i915,
3713 						 TGL_DPLL_CFGCR1(id));
3714 		if (i915->display.vbt.override_afc_startup) {
3715 			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3716 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3717 		}
3718 	} else {
3719 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3720 		    id == DPLL_ID_EHL_DPLL4) {
3721 			hw_state->cfgcr0 = intel_de_read(i915,
3722 							 ICL_DPLL_CFGCR0(4));
3723 			hw_state->cfgcr1 = intel_de_read(i915,
3724 							 ICL_DPLL_CFGCR1(4));
3725 		} else {
3726 			hw_state->cfgcr0 = intel_de_read(i915,
3727 							 ICL_DPLL_CFGCR0(id));
3728 			hw_state->cfgcr1 = intel_de_read(i915,
3729 							 ICL_DPLL_CFGCR1(id));
3730 		}
3731 	}
3732 
3733 	ret = true;
3734 out:
3735 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3736 	return ret;
3737 }
3738 
combo_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3739 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3740 				   struct intel_shared_dpll *pll,
3741 				   struct intel_dpll_hw_state *dpll_hw_state)
3742 {
3743 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3744 
3745 	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
3746 }
3747 
tbt_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3748 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3749 				 struct intel_shared_dpll *pll,
3750 				 struct intel_dpll_hw_state *dpll_hw_state)
3751 {
3752 	return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
3753 }
3754 
icl_dpll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3755 static void icl_dpll_write(struct drm_i915_private *i915,
3756 			   struct intel_shared_dpll *pll,
3757 			   const struct icl_dpll_hw_state *hw_state)
3758 {
3759 	const enum intel_dpll_id id = pll->info->id;
3760 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3761 
3762 	if (IS_ALDERLAKE_S(i915)) {
3763 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3764 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3765 	} else if (IS_DG1(i915)) {
3766 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3767 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3768 	} else if (IS_ROCKETLAKE(i915)) {
3769 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3770 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3771 	} else if (DISPLAY_VER(i915) >= 12) {
3772 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3773 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3774 		div0_reg = TGL_DPLL0_DIV0(id);
3775 	} else {
3776 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3777 		    id == DPLL_ID_EHL_DPLL4) {
3778 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3779 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3780 		} else {
3781 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3782 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3783 		}
3784 	}
3785 
3786 	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3787 	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3788 	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3789 			 !i915_mmio_reg_valid(div0_reg));
3790 	if (i915->display.vbt.override_afc_startup &&
3791 	    i915_mmio_reg_valid(div0_reg))
3792 		intel_de_rmw(i915, div0_reg,
3793 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3794 	intel_de_posting_read(i915, cfgcr1_reg);
3795 }
3796 
icl_mg_pll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3797 static void icl_mg_pll_write(struct drm_i915_private *i915,
3798 			     struct intel_shared_dpll *pll,
3799 			     const struct icl_dpll_hw_state *hw_state)
3800 {
3801 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3802 
3803 	/*
3804 	 * Some of the following registers have reserved fields, so program
3805 	 * these with RMW based on a mask. The mask can be fixed or generated
3806 	 * during the calc/readout phase if the mask depends on some other HW
3807 	 * state like refclk, see icl_calc_mg_pll_state().
3808 	 */
3809 	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3810 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3811 
3812 	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3813 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3814 		     hw_state->mg_clktop2_coreclkctl1);
3815 
3816 	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3817 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3818 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3819 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3820 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3821 		     hw_state->mg_clktop2_hsclkctl);
3822 
3823 	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3824 	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3825 	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3826 	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3827 		       hw_state->mg_pll_frac_lock);
3828 	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3829 
3830 	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3831 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3832 
3833 	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3834 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3835 		     hw_state->mg_pll_tdc_coldst_bias);
3836 
3837 	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3838 }
3839 
dkl_pll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3840 static void dkl_pll_write(struct drm_i915_private *i915,
3841 			  struct intel_shared_dpll *pll,
3842 			  const struct icl_dpll_hw_state *hw_state)
3843 {
3844 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3845 	u32 val;
3846 
3847 	/*
3848 	 * All registers programmed here have the same HIP_INDEX_REG even
3849 	 * though on different building block
3850 	 */
3851 	/* All the registers are RMW */
3852 	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3853 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3854 	val |= hw_state->mg_refclkin_ctl;
3855 	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3856 
3857 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3858 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3859 	val |= hw_state->mg_clktop2_coreclkctl1;
3860 	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3861 
3862 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3863 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3864 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3865 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3866 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3867 	val |= hw_state->mg_clktop2_hsclkctl;
3868 	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3869 
3870 	val = DKL_PLL_DIV0_MASK;
3871 	if (i915->display.vbt.override_afc_startup)
3872 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3873 	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3874 			  hw_state->mg_pll_div0);
3875 
3876 	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3877 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3878 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3879 	val |= hw_state->mg_pll_div1;
3880 	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3881 
3882 	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3883 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3884 		 DKL_PLL_SSC_STEP_LEN_MASK |
3885 		 DKL_PLL_SSC_STEP_NUM_MASK |
3886 		 DKL_PLL_SSC_EN);
3887 	val |= hw_state->mg_pll_ssc;
3888 	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3889 
3890 	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3891 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3892 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3893 	val |= hw_state->mg_pll_bias;
3894 	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3895 
3896 	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3897 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3898 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3899 	val |= hw_state->mg_pll_tdc_coldst_bias;
3900 	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3901 
3902 	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3903 }
3904 
icl_pll_power_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3905 static void icl_pll_power_enable(struct drm_i915_private *i915,
3906 				 struct intel_shared_dpll *pll,
3907 				 i915_reg_t enable_reg)
3908 {
3909 	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3910 
3911 	/*
3912 	 * The spec says we need to "wait" but it also says it should be
3913 	 * immediate.
3914 	 */
3915 	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3916 		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3917 			pll->info->id);
3918 }
3919 
icl_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3920 static void icl_pll_enable(struct drm_i915_private *i915,
3921 			   struct intel_shared_dpll *pll,
3922 			   i915_reg_t enable_reg)
3923 {
3924 	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3925 
3926 	/* Timeout is actually 600us. */
3927 	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3928 		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3929 }
3930 
adlp_cmtg_clock_gating_wa(struct drm_i915_private * i915,struct intel_shared_dpll * pll)3931 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3932 {
3933 	u32 val;
3934 
3935 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3936 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3937 		return;
3938 	/*
3939 	 * Wa_16011069516:adl-p[a0]
3940 	 *
3941 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3942 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3943 	 * sanity check this assumption with a double read, which presumably
3944 	 * returns the correct value even with clock gating on.
3945 	 *
3946 	 * Instead of the usual place for workarounds we apply this one here,
3947 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3948 	 */
3949 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3950 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3951 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3952 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3953 }
3954 
combo_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3955 static void combo_pll_enable(struct drm_i915_private *i915,
3956 			     struct intel_shared_dpll *pll,
3957 			     const struct intel_dpll_hw_state *dpll_hw_state)
3958 {
3959 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3960 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3961 
3962 	icl_pll_power_enable(i915, pll, enable_reg);
3963 
3964 	icl_dpll_write(i915, pll, hw_state);
3965 
3966 	/*
3967 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3968 	 * paths should already be setting the appropriate voltage, hence we do
3969 	 * nothing here.
3970 	 */
3971 
3972 	icl_pll_enable(i915, pll, enable_reg);
3973 
3974 	adlp_cmtg_clock_gating_wa(i915, pll);
3975 
3976 	/* DVFS post sequence would be here. See the comment above. */
3977 }
3978 
tbt_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3979 static void tbt_pll_enable(struct drm_i915_private *i915,
3980 			   struct intel_shared_dpll *pll,
3981 			   const struct intel_dpll_hw_state *dpll_hw_state)
3982 {
3983 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3984 
3985 	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3986 
3987 	icl_dpll_write(i915, pll, hw_state);
3988 
3989 	/*
3990 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3991 	 * paths should already be setting the appropriate voltage, hence we do
3992 	 * nothing here.
3993 	 */
3994 
3995 	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3996 
3997 	/* DVFS post sequence would be here. See the comment above. */
3998 }
3999 
mg_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)4000 static void mg_pll_enable(struct drm_i915_private *i915,
4001 			  struct intel_shared_dpll *pll,
4002 			  const struct intel_dpll_hw_state *dpll_hw_state)
4003 {
4004 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4005 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4006 
4007 	icl_pll_power_enable(i915, pll, enable_reg);
4008 
4009 	if (DISPLAY_VER(i915) >= 12)
4010 		dkl_pll_write(i915, pll, hw_state);
4011 	else
4012 		icl_mg_pll_write(i915, pll, hw_state);
4013 
4014 	/*
4015 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4016 	 * paths should already be setting the appropriate voltage, hence we do
4017 	 * nothing here.
4018 	 */
4019 
4020 	icl_pll_enable(i915, pll, enable_reg);
4021 
4022 	/* DVFS post sequence would be here. See the comment above. */
4023 }
4024 
icl_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)4025 static void icl_pll_disable(struct drm_i915_private *i915,
4026 			    struct intel_shared_dpll *pll,
4027 			    i915_reg_t enable_reg)
4028 {
4029 	/* The first steps are done by intel_ddi_post_disable(). */
4030 
4031 	/*
4032 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4033 	 * paths should already be setting the appropriate voltage, hence we do
4034 	 * nothing here.
4035 	 */
4036 
4037 	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
4038 
4039 	/* Timeout is actually 1us. */
4040 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
4041 		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
4042 
4043 	/* DVFS post sequence would be here. See the comment above. */
4044 
4045 	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
4046 
4047 	/*
4048 	 * The spec says we need to "wait" but it also says it should be
4049 	 * immediate.
4050 	 */
4051 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
4052 		drm_err(&i915->drm, "PLL %d Power not disabled\n",
4053 			pll->info->id);
4054 }
4055 
combo_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4056 static void combo_pll_disable(struct drm_i915_private *i915,
4057 			      struct intel_shared_dpll *pll)
4058 {
4059 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
4060 
4061 	icl_pll_disable(i915, pll, enable_reg);
4062 }
4063 
tbt_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4064 static void tbt_pll_disable(struct drm_i915_private *i915,
4065 			    struct intel_shared_dpll *pll)
4066 {
4067 	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4068 }
4069 
mg_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4070 static void mg_pll_disable(struct drm_i915_private *i915,
4071 			   struct intel_shared_dpll *pll)
4072 {
4073 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4074 
4075 	icl_pll_disable(i915, pll, enable_reg);
4076 }
4077 
icl_update_dpll_ref_clks(struct drm_i915_private * i915)4078 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4079 {
4080 	/* No SSC ref */
4081 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4082 }
4083 
icl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4084 static void icl_dump_hw_state(struct drm_printer *p,
4085 			      const struct intel_dpll_hw_state *dpll_hw_state)
4086 {
4087 	const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4088 
4089 	drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4090 		   "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4091 		   "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4092 		   "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4093 		   "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4094 		   "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4095 		   hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4096 		   hw_state->mg_refclkin_ctl,
4097 		   hw_state->mg_clktop2_coreclkctl1,
4098 		   hw_state->mg_clktop2_hsclkctl,
4099 		   hw_state->mg_pll_div0,
4100 		   hw_state->mg_pll_div1,
4101 		   hw_state->mg_pll_lf,
4102 		   hw_state->mg_pll_frac_lock,
4103 		   hw_state->mg_pll_ssc,
4104 		   hw_state->mg_pll_bias,
4105 		   hw_state->mg_pll_tdc_coldst_bias);
4106 }
4107 
icl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)4108 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4109 				 const struct intel_dpll_hw_state *_b)
4110 {
4111 	const struct icl_dpll_hw_state *a = &_a->icl;
4112 	const struct icl_dpll_hw_state *b = &_b->icl;
4113 
4114 	/* FIXME split combo vs. mg more thoroughly */
4115 	return a->cfgcr0 == b->cfgcr0 &&
4116 		a->cfgcr1 == b->cfgcr1 &&
4117 		a->div0 == b->div0 &&
4118 		a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4119 		a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4120 		a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4121 		a->mg_pll_div0 == b->mg_pll_div0 &&
4122 		a->mg_pll_div1 == b->mg_pll_div1 &&
4123 		a->mg_pll_lf == b->mg_pll_lf &&
4124 		a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4125 		a->mg_pll_ssc == b->mg_pll_ssc &&
4126 		a->mg_pll_bias == b->mg_pll_bias &&
4127 		a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4128 }
4129 
4130 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4131 	.enable = combo_pll_enable,
4132 	.disable = combo_pll_disable,
4133 	.get_hw_state = combo_pll_get_hw_state,
4134 	.get_freq = icl_ddi_combo_pll_get_freq,
4135 };
4136 
4137 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4138 	.enable = tbt_pll_enable,
4139 	.disable = tbt_pll_disable,
4140 	.get_hw_state = tbt_pll_get_hw_state,
4141 	.get_freq = icl_ddi_tbt_pll_get_freq,
4142 };
4143 
4144 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4145 	.enable = mg_pll_enable,
4146 	.disable = mg_pll_disable,
4147 	.get_hw_state = mg_pll_get_hw_state,
4148 	.get_freq = icl_ddi_mg_pll_get_freq,
4149 };
4150 
4151 static const struct dpll_info icl_plls[] = {
4152 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4153 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4154 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4155 	  .is_alt_port_dpll = true, },
4156 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4157 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4158 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4159 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4160 	{}
4161 };
4162 
4163 static const struct intel_dpll_mgr icl_pll_mgr = {
4164 	.dpll_info = icl_plls,
4165 	.compute_dplls = icl_compute_dplls,
4166 	.get_dplls = icl_get_dplls,
4167 	.put_dplls = icl_put_dplls,
4168 	.update_active_dpll = icl_update_active_dpll,
4169 	.update_ref_clks = icl_update_dpll_ref_clks,
4170 	.dump_hw_state = icl_dump_hw_state,
4171 	.compare_hw_state = icl_compare_hw_state,
4172 };
4173 
4174 static const struct dpll_info ehl_plls[] = {
4175 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4176 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4177 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4178 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4179 	{}
4180 };
4181 
4182 static const struct intel_dpll_mgr ehl_pll_mgr = {
4183 	.dpll_info = ehl_plls,
4184 	.compute_dplls = icl_compute_dplls,
4185 	.get_dplls = icl_get_dplls,
4186 	.put_dplls = icl_put_dplls,
4187 	.update_ref_clks = icl_update_dpll_ref_clks,
4188 	.dump_hw_state = icl_dump_hw_state,
4189 	.compare_hw_state = icl_compare_hw_state,
4190 };
4191 
4192 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4193 	.enable = mg_pll_enable,
4194 	.disable = mg_pll_disable,
4195 	.get_hw_state = dkl_pll_get_hw_state,
4196 	.get_freq = icl_ddi_mg_pll_get_freq,
4197 };
4198 
4199 static const struct dpll_info tgl_plls[] = {
4200 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4201 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4202 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4203 	  .is_alt_port_dpll = true, },
4204 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4205 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4206 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4207 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4208 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4209 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4210 	{}
4211 };
4212 
4213 static const struct intel_dpll_mgr tgl_pll_mgr = {
4214 	.dpll_info = tgl_plls,
4215 	.compute_dplls = icl_compute_dplls,
4216 	.get_dplls = icl_get_dplls,
4217 	.put_dplls = icl_put_dplls,
4218 	.update_active_dpll = icl_update_active_dpll,
4219 	.update_ref_clks = icl_update_dpll_ref_clks,
4220 	.dump_hw_state = icl_dump_hw_state,
4221 	.compare_hw_state = icl_compare_hw_state,
4222 };
4223 
4224 static const struct dpll_info rkl_plls[] = {
4225 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4226 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4227 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4228 	{}
4229 };
4230 
4231 static const struct intel_dpll_mgr rkl_pll_mgr = {
4232 	.dpll_info = rkl_plls,
4233 	.compute_dplls = icl_compute_dplls,
4234 	.get_dplls = icl_get_dplls,
4235 	.put_dplls = icl_put_dplls,
4236 	.update_ref_clks = icl_update_dpll_ref_clks,
4237 	.dump_hw_state = icl_dump_hw_state,
4238 	.compare_hw_state = icl_compare_hw_state,
4239 };
4240 
4241 static const struct dpll_info dg1_plls[] = {
4242 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4243 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4244 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4245 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4246 	{}
4247 };
4248 
4249 static const struct intel_dpll_mgr dg1_pll_mgr = {
4250 	.dpll_info = dg1_plls,
4251 	.compute_dplls = icl_compute_dplls,
4252 	.get_dplls = icl_get_dplls,
4253 	.put_dplls = icl_put_dplls,
4254 	.update_ref_clks = icl_update_dpll_ref_clks,
4255 	.dump_hw_state = icl_dump_hw_state,
4256 	.compare_hw_state = icl_compare_hw_state,
4257 };
4258 
4259 static const struct dpll_info adls_plls[] = {
4260 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4261 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4262 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4263 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4264 	{}
4265 };
4266 
4267 static const struct intel_dpll_mgr adls_pll_mgr = {
4268 	.dpll_info = adls_plls,
4269 	.compute_dplls = icl_compute_dplls,
4270 	.get_dplls = icl_get_dplls,
4271 	.put_dplls = icl_put_dplls,
4272 	.update_ref_clks = icl_update_dpll_ref_clks,
4273 	.dump_hw_state = icl_dump_hw_state,
4274 	.compare_hw_state = icl_compare_hw_state,
4275 };
4276 
4277 static const struct dpll_info adlp_plls[] = {
4278 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4279 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4280 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4281 	  .is_alt_port_dpll = true, },
4282 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4283 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4284 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4285 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4286 	{}
4287 };
4288 
4289 static const struct intel_dpll_mgr adlp_pll_mgr = {
4290 	.dpll_info = adlp_plls,
4291 	.compute_dplls = icl_compute_dplls,
4292 	.get_dplls = icl_get_dplls,
4293 	.put_dplls = icl_put_dplls,
4294 	.update_active_dpll = icl_update_active_dpll,
4295 	.update_ref_clks = icl_update_dpll_ref_clks,
4296 	.dump_hw_state = icl_dump_hw_state,
4297 	.compare_hw_state = icl_compare_hw_state,
4298 };
4299 
4300 /**
4301  * intel_shared_dpll_init - Initialize shared DPLLs
4302  * @i915: i915 device
4303  *
4304  * Initialize shared DPLLs for @i915.
4305  */
intel_shared_dpll_init(struct drm_i915_private * i915)4306 void intel_shared_dpll_init(struct drm_i915_private *i915)
4307 {
4308 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4309 	const struct dpll_info *dpll_info;
4310 	int i;
4311 
4312 	mutex_init(&i915->display.dpll.lock);
4313 
4314 	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4315 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4316 		dpll_mgr = NULL;
4317 	else if (IS_ALDERLAKE_P(i915))
4318 		dpll_mgr = &adlp_pll_mgr;
4319 	else if (IS_ALDERLAKE_S(i915))
4320 		dpll_mgr = &adls_pll_mgr;
4321 	else if (IS_DG1(i915))
4322 		dpll_mgr = &dg1_pll_mgr;
4323 	else if (IS_ROCKETLAKE(i915))
4324 		dpll_mgr = &rkl_pll_mgr;
4325 	else if (DISPLAY_VER(i915) >= 12)
4326 		dpll_mgr = &tgl_pll_mgr;
4327 	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4328 		dpll_mgr = &ehl_pll_mgr;
4329 	else if (DISPLAY_VER(i915) >= 11)
4330 		dpll_mgr = &icl_pll_mgr;
4331 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4332 		dpll_mgr = &bxt_pll_mgr;
4333 	else if (DISPLAY_VER(i915) == 9)
4334 		dpll_mgr = &skl_pll_mgr;
4335 	else if (HAS_DDI(i915))
4336 		dpll_mgr = &hsw_pll_mgr;
4337 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4338 		dpll_mgr = &pch_pll_mgr;
4339 
4340 	if (!dpll_mgr)
4341 		return;
4342 
4343 	dpll_info = dpll_mgr->dpll_info;
4344 
4345 	for (i = 0; dpll_info[i].name; i++) {
4346 		if (drm_WARN_ON(&i915->drm,
4347 				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4348 			break;
4349 
4350 		/* must fit into unsigned long bitmask on 32bit */
4351 		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4352 			break;
4353 
4354 		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4355 		i915->display.dpll.shared_dplls[i].index = i;
4356 	}
4357 
4358 	i915->display.dpll.mgr = dpll_mgr;
4359 	i915->display.dpll.num_shared_dpll = i;
4360 }
4361 
4362 /**
4363  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4364  * @state: atomic state
4365  * @crtc: CRTC to compute DPLLs for
4366  * @encoder: encoder
4367  *
4368  * This function computes the DPLL state for the given CRTC and encoder.
4369  *
4370  * The new configuration in the atomic commit @state is made effective by
4371  * calling intel_shared_dpll_swap_state().
4372  *
4373  * Returns:
4374  * 0 on success, negative error code on falure.
4375  */
intel_compute_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4376 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4377 			       struct intel_crtc *crtc,
4378 			       struct intel_encoder *encoder)
4379 {
4380 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4381 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4382 
4383 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4384 		return -EINVAL;
4385 
4386 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4387 }
4388 
4389 /**
4390  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4391  * @state: atomic state
4392  * @crtc: CRTC to reserve DPLLs for
4393  * @encoder: encoder
4394  *
4395  * This function reserves all required DPLLs for the given CRTC and encoder
4396  * combination in the current atomic commit @state and the new @crtc atomic
4397  * state.
4398  *
4399  * The new configuration in the atomic commit @state is made effective by
4400  * calling intel_shared_dpll_swap_state().
4401  *
4402  * The reserved DPLLs should be released by calling
4403  * intel_release_shared_dplls().
4404  *
4405  * Returns:
4406  * 0 if all required DPLLs were successfully reserved,
4407  * negative error code otherwise.
4408  */
intel_reserve_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4409 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4410 			       struct intel_crtc *crtc,
4411 			       struct intel_encoder *encoder)
4412 {
4413 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4414 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4415 
4416 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4417 		return -EINVAL;
4418 
4419 	return dpll_mgr->get_dplls(state, crtc, encoder);
4420 }
4421 
4422 /**
4423  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4424  * @state: atomic state
4425  * @crtc: crtc from which the DPLLs are to be released
4426  *
4427  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4428  * from the current atomic commit @state and the old @crtc atomic state.
4429  *
4430  * The new configuration in the atomic commit @state is made effective by
4431  * calling intel_shared_dpll_swap_state().
4432  */
intel_release_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)4433 void intel_release_shared_dplls(struct intel_atomic_state *state,
4434 				struct intel_crtc *crtc)
4435 {
4436 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4437 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4438 
4439 	/*
4440 	 * FIXME: this function is called for every platform having a
4441 	 * compute_clock hook, even though the platform doesn't yet support
4442 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4443 	 * called on those.
4444 	 */
4445 	if (!dpll_mgr)
4446 		return;
4447 
4448 	dpll_mgr->put_dplls(state, crtc);
4449 }
4450 
4451 /**
4452  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4453  * @state: atomic state
4454  * @crtc: the CRTC for which to update the active DPLL
4455  * @encoder: encoder determining the type of port DPLL
4456  *
4457  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4458  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4459  * DPLL selected will be based on the current mode of the encoder's port.
4460  */
intel_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4461 void intel_update_active_dpll(struct intel_atomic_state *state,
4462 			      struct intel_crtc *crtc,
4463 			      struct intel_encoder *encoder)
4464 {
4465 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4466 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4467 
4468 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4469 		return;
4470 
4471 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4472 }
4473 
4474 /**
4475  * intel_dpll_get_freq - calculate the DPLL's output frequency
4476  * @i915: i915 device
4477  * @pll: DPLL for which to calculate the output frequency
4478  * @dpll_hw_state: DPLL state from which to calculate the output frequency
4479  *
4480  * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4481  */
intel_dpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)4482 int intel_dpll_get_freq(struct drm_i915_private *i915,
4483 			const struct intel_shared_dpll *pll,
4484 			const struct intel_dpll_hw_state *dpll_hw_state)
4485 {
4486 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4487 		return 0;
4488 
4489 	return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
4490 }
4491 
4492 /**
4493  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4494  * @i915: i915 device
4495  * @pll: DPLL for which to calculate the output frequency
4496  * @dpll_hw_state: DPLL's hardware state
4497  *
4498  * Read out @pll's hardware state into @dpll_hw_state.
4499  */
intel_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)4500 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4501 			     struct intel_shared_dpll *pll,
4502 			     struct intel_dpll_hw_state *dpll_hw_state)
4503 {
4504 	return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
4505 }
4506 
readout_dpll_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4507 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4508 				  struct intel_shared_dpll *pll)
4509 {
4510 	struct intel_crtc *crtc;
4511 
4512 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4513 
4514 	if (pll->on && pll->info->power_domain)
4515 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4516 
4517 	pll->state.pipe_mask = 0;
4518 	for_each_intel_crtc(&i915->drm, crtc) {
4519 		struct intel_crtc_state *crtc_state =
4520 			to_intel_crtc_state(crtc->base.state);
4521 
4522 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4523 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4524 	}
4525 	pll->active_mask = pll->state.pipe_mask;
4526 
4527 	drm_dbg_kms(&i915->drm,
4528 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4529 		    pll->info->name, pll->state.pipe_mask, pll->on);
4530 }
4531 
intel_dpll_update_ref_clks(struct drm_i915_private * i915)4532 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4533 {
4534 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4535 		i915->display.dpll.mgr->update_ref_clks(i915);
4536 }
4537 
intel_dpll_readout_hw_state(struct drm_i915_private * i915)4538 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4539 {
4540 	struct intel_shared_dpll *pll;
4541 	int i;
4542 
4543 	for_each_shared_dpll(i915, pll, i)
4544 		readout_dpll_hw_state(i915, pll);
4545 }
4546 
sanitize_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4547 static void sanitize_dpll_state(struct drm_i915_private *i915,
4548 				struct intel_shared_dpll *pll)
4549 {
4550 	if (!pll->on)
4551 		return;
4552 
4553 	adlp_cmtg_clock_gating_wa(i915, pll);
4554 
4555 	if (pll->active_mask)
4556 		return;
4557 
4558 	drm_dbg_kms(&i915->drm,
4559 		    "%s enabled but not in use, disabling\n",
4560 		    pll->info->name);
4561 
4562 	_intel_disable_shared_dpll(i915, pll);
4563 }
4564 
intel_dpll_sanitize_state(struct drm_i915_private * i915)4565 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4566 {
4567 	struct intel_shared_dpll *pll;
4568 	int i;
4569 
4570 	for_each_shared_dpll(i915, pll, i)
4571 		sanitize_dpll_state(i915, pll);
4572 }
4573 
4574 /**
4575  * intel_dpll_dump_hw_state - dump hw_state
4576  * @i915: i915 drm device
4577  * @p: where to print the state to
4578  * @dpll_hw_state: hw state to be dumped
4579  *
4580  * Dumo out the relevant values in @dpll_hw_state.
4581  */
intel_dpll_dump_hw_state(struct drm_i915_private * i915,struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4582 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4583 			      struct drm_printer *p,
4584 			      const struct intel_dpll_hw_state *dpll_hw_state)
4585 {
4586 	if (i915->display.dpll.mgr) {
4587 		i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
4588 	} else {
4589 		/* fallback for platforms that don't use the shared dpll
4590 		 * infrastructure
4591 		 */
4592 		ibx_dump_hw_state(p, dpll_hw_state);
4593 	}
4594 }
4595 
4596 /**
4597  * intel_dpll_compare_hw_state - compare the two states
4598  * @i915: i915 drm device
4599  * @a: first DPLL hw state
4600  * @b: second DPLL hw state
4601  *
4602  * Compare DPLL hw states @a and @b.
4603  *
4604  * Returns: true if the states are equal, false if the differ
4605  */
intel_dpll_compare_hw_state(struct drm_i915_private * i915,const struct intel_dpll_hw_state * a,const struct intel_dpll_hw_state * b)4606 bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4607 				 const struct intel_dpll_hw_state *a,
4608 				 const struct intel_dpll_hw_state *b)
4609 {
4610 	if (i915->display.dpll.mgr) {
4611 		return i915->display.dpll.mgr->compare_hw_state(a, b);
4612 	} else {
4613 		/* fallback for platforms that don't use the shared dpll
4614 		 * infrastructure
4615 		 */
4616 		return ibx_compare_hw_state(a, b);
4617 	}
4618 }
4619 
4620 static void
verify_single_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_crtc * crtc,const struct intel_crtc_state * new_crtc_state)4621 verify_single_dpll_state(struct drm_i915_private *i915,
4622 			 struct intel_shared_dpll *pll,
4623 			 struct intel_crtc *crtc,
4624 			 const struct intel_crtc_state *new_crtc_state)
4625 {
4626 	struct intel_display *display = &i915->display;
4627 	struct intel_dpll_hw_state dpll_hw_state = {};
4628 	u8 pipe_mask;
4629 	bool active;
4630 
4631 	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4632 
4633 	if (!pll->info->always_on) {
4634 		INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
4635 					 "%s: pll in active use but not on in sw tracking\n",
4636 					 pll->info->name);
4637 		INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
4638 					 "%s: pll is on but not used by any active pipe\n",
4639 					 pll->info->name);
4640 		INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
4641 					 "%s: pll on state mismatch (expected %i, found %i)\n",
4642 					 pll->info->name, pll->on, active);
4643 	}
4644 
4645 	if (!crtc) {
4646 		INTEL_DISPLAY_STATE_WARN(display,
4647 					 pll->active_mask & ~pll->state.pipe_mask,
4648 					 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4649 					 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4650 
4651 		return;
4652 	}
4653 
4654 	pipe_mask = BIT(crtc->pipe);
4655 
4656 	if (new_crtc_state->hw.active)
4657 		INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
4658 					 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4659 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4660 	else
4661 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4662 					 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4663 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4664 
4665 	INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
4666 				 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4667 				 pll->info->name, pipe_mask, pll->state.pipe_mask);
4668 
4669 	INTEL_DISPLAY_STATE_WARN(display,
4670 				 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4671 						   sizeof(dpll_hw_state)),
4672 				 "%s: pll hw state mismatch\n",
4673 				 pll->info->name);
4674 }
4675 
has_alt_port_dpll(const struct intel_shared_dpll * old_pll,const struct intel_shared_dpll * new_pll)4676 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4677 			      const struct intel_shared_dpll *new_pll)
4678 {
4679 	return old_pll && new_pll && old_pll != new_pll &&
4680 		(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4681 }
4682 
intel_shared_dpll_state_verify(struct intel_atomic_state * state,struct intel_crtc * crtc)4683 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4684 				    struct intel_crtc *crtc)
4685 {
4686 	struct intel_display *display = to_intel_display(state);
4687 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4688 	const struct intel_crtc_state *old_crtc_state =
4689 		intel_atomic_get_old_crtc_state(state, crtc);
4690 	const struct intel_crtc_state *new_crtc_state =
4691 		intel_atomic_get_new_crtc_state(state, crtc);
4692 
4693 	if (new_crtc_state->shared_dpll)
4694 		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4695 					 crtc, new_crtc_state);
4696 
4697 	if (old_crtc_state->shared_dpll &&
4698 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4699 		u8 pipe_mask = BIT(crtc->pipe);
4700 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4701 
4702 		INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
4703 					 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4704 					 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4705 
4706 		/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4707 		INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4708 								     new_crtc_state->shared_dpll) &&
4709 					 pll->state.pipe_mask & pipe_mask,
4710 					 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4711 					 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4712 	}
4713 }
4714 
intel_shared_dpll_verify_disabled(struct intel_atomic_state * state)4715 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4716 {
4717 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4718 	struct intel_shared_dpll *pll;
4719 	int i;
4720 
4721 	for_each_shared_dpll(i915, pll, i)
4722 		verify_single_dpll_state(i915, pll, NULL, NULL);
4723 }
4724