xref: /linux/drivers/gpu/drm/i915/display/intel_fdi.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include <drm/drm_fixed.h>
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "intel_atomic.h"
13 #include "intel_crtc.h"
14 #include "intel_ddi.h"
15 #include "intel_de.h"
16 #include "intel_dp.h"
17 #include "intel_display_types.h"
18 #include "intel_fdi.h"
19 #include "intel_fdi_regs.h"
20 #include "intel_link_bw.h"
21 
22 struct intel_fdi_funcs {
23 	void (*fdi_link_train)(struct intel_crtc *crtc,
24 			       const struct intel_crtc_state *crtc_state);
25 };
26 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)27 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
28 			  enum pipe pipe, bool state)
29 {
30 	struct intel_display *display = &dev_priv->display;
31 	bool cur_state;
32 
33 	if (HAS_DDI(display)) {
34 		/*
35 		 * DDI does not have a specific FDI_TX register.
36 		 *
37 		 * FDI is never fed from EDP transcoder
38 		 * so pipe->transcoder cast is fine here.
39 		 */
40 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
41 		cur_state = intel_de_read(display,
42 					  TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
43 	} else {
44 		cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
45 	}
46 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
47 				 "FDI TX state assertion failure (expected %s, current %s)\n",
48 				 str_on_off(state), str_on_off(cur_state));
49 }
50 
assert_fdi_tx_enabled(struct drm_i915_private * i915,enum pipe pipe)51 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
52 {
53 	assert_fdi_tx(i915, pipe, true);
54 }
55 
assert_fdi_tx_disabled(struct drm_i915_private * i915,enum pipe pipe)56 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
57 {
58 	assert_fdi_tx(i915, pipe, false);
59 }
60 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)61 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
62 			  enum pipe pipe, bool state)
63 {
64 	struct intel_display *display = &dev_priv->display;
65 	bool cur_state;
66 
67 	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
68 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
69 				 "FDI RX state assertion failure (expected %s, current %s)\n",
70 				 str_on_off(state), str_on_off(cur_state));
71 }
72 
assert_fdi_rx_enabled(struct drm_i915_private * i915,enum pipe pipe)73 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
74 {
75 	assert_fdi_rx(i915, pipe, true);
76 }
77 
assert_fdi_rx_disabled(struct drm_i915_private * i915,enum pipe pipe)78 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
79 {
80 	assert_fdi_rx(i915, pipe, false);
81 }
82 
assert_fdi_tx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)83 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
84 			       enum pipe pipe)
85 {
86 	struct intel_display *display = &i915->display;
87 	bool cur_state;
88 
89 	/* ILK FDI PLL is always enabled */
90 	if (IS_IRONLAKE(i915))
91 		return;
92 
93 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
94 	if (HAS_DDI(display))
95 		return;
96 
97 	cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
98 	INTEL_DISPLAY_STATE_WARN(display, !cur_state,
99 				 "FDI TX PLL assertion failure, should be active but is disabled\n");
100 }
101 
assert_fdi_rx_pll(struct drm_i915_private * i915,enum pipe pipe,bool state)102 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
103 			      enum pipe pipe, bool state)
104 {
105 	struct intel_display *display = &i915->display;
106 	bool cur_state;
107 
108 	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
109 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
110 				 "FDI RX PLL assertion failure (expected %s, current %s)\n",
111 				 str_on_off(state), str_on_off(cur_state));
112 }
113 
assert_fdi_rx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)114 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
115 {
116 	assert_fdi_rx_pll(i915, pipe, true);
117 }
118 
assert_fdi_rx_pll_disabled(struct drm_i915_private * i915,enum pipe pipe)119 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
120 {
121 	assert_fdi_rx_pll(i915, pipe, false);
122 }
123 
intel_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)124 void intel_fdi_link_train(struct intel_crtc *crtc,
125 			  const struct intel_crtc_state *crtc_state)
126 {
127 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
128 
129 	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
130 }
131 
132 /**
133  * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
134  * @state: intel atomic state
135  *
136  * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
137  * known to affect the available FDI BW for the former CRTC. In practice this
138  * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
139  * CRTC C) and CRTC C is getting disabled.
140  *
141  * Returns 0 in case of success, or a negative error code otherwise.
142  */
intel_fdi_add_affected_crtcs(struct intel_atomic_state * state)143 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
144 {
145 	struct intel_display *display = to_intel_display(state);
146 	struct drm_i915_private *i915 = to_i915(state->base.dev);
147 	const struct intel_crtc_state *old_crtc_state;
148 	const struct intel_crtc_state *new_crtc_state;
149 	struct intel_crtc *crtc;
150 
151 	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
152 		return 0;
153 
154 	crtc = intel_crtc_for_pipe(display, PIPE_C);
155 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
156 	if (!new_crtc_state)
157 		return 0;
158 
159 	if (!intel_crtc_needs_modeset(new_crtc_state))
160 		return 0;
161 
162 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
163 	if (!old_crtc_state->fdi_lanes)
164 		return 0;
165 
166 	crtc = intel_crtc_for_pipe(display, PIPE_B);
167 	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
168 	if (IS_ERR(new_crtc_state))
169 		return PTR_ERR(new_crtc_state);
170 
171 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
172 	if (!old_crtc_state->fdi_lanes)
173 		return 0;
174 
175 	return intel_modeset_pipes_in_mask_early(state,
176 						 "FDI link BW decrease on pipe C",
177 						 BIT(PIPE_B));
178 }
179 
180 /* units of 100MHz */
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)181 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
182 {
183 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
184 		return crtc_state->fdi_lanes;
185 
186 	return 0;
187 }
188 
ilk_check_fdi_lanes(struct drm_device * dev,enum pipe pipe,struct intel_crtc_state * pipe_config,enum pipe * pipe_to_reduce)189 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
190 			       struct intel_crtc_state *pipe_config,
191 			       enum pipe *pipe_to_reduce)
192 {
193 	struct intel_display *display = to_intel_display(dev);
194 	struct drm_i915_private *dev_priv = to_i915(dev);
195 	struct drm_atomic_state *state = pipe_config->uapi.state;
196 	struct intel_crtc *other_crtc;
197 	struct intel_crtc_state *other_crtc_state;
198 
199 	*pipe_to_reduce = pipe;
200 
201 	drm_dbg_kms(&dev_priv->drm,
202 		    "checking fdi config on pipe %c, lanes %i\n",
203 		    pipe_name(pipe), pipe_config->fdi_lanes);
204 	if (pipe_config->fdi_lanes > 4) {
205 		drm_dbg_kms(&dev_priv->drm,
206 			    "invalid fdi lane config on pipe %c: %i lanes\n",
207 			    pipe_name(pipe), pipe_config->fdi_lanes);
208 		return -EINVAL;
209 	}
210 
211 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
212 		if (pipe_config->fdi_lanes > 2) {
213 			drm_dbg_kms(&dev_priv->drm,
214 				    "only 2 lanes on haswell, required: %i lanes\n",
215 				    pipe_config->fdi_lanes);
216 			return -EINVAL;
217 		} else {
218 			return 0;
219 		}
220 	}
221 
222 	if (INTEL_NUM_PIPES(dev_priv) == 2)
223 		return 0;
224 
225 	/* Ivybridge 3 pipe is really complicated */
226 	switch (pipe) {
227 	case PIPE_A:
228 		return 0;
229 	case PIPE_B:
230 		if (pipe_config->fdi_lanes <= 2)
231 			return 0;
232 
233 		other_crtc = intel_crtc_for_pipe(display, PIPE_C);
234 		other_crtc_state =
235 			intel_atomic_get_crtc_state(state, other_crtc);
236 		if (IS_ERR(other_crtc_state))
237 			return PTR_ERR(other_crtc_state);
238 
239 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
240 			drm_dbg_kms(&dev_priv->drm,
241 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
242 				    pipe_name(pipe), pipe_config->fdi_lanes);
243 			return -EINVAL;
244 		}
245 		return 0;
246 	case PIPE_C:
247 		if (pipe_config->fdi_lanes > 2) {
248 			drm_dbg_kms(&dev_priv->drm,
249 				    "only 2 lanes on pipe %c: required %i lanes\n",
250 				    pipe_name(pipe), pipe_config->fdi_lanes);
251 			return -EINVAL;
252 		}
253 
254 		other_crtc = intel_crtc_for_pipe(display, PIPE_B);
255 		other_crtc_state =
256 			intel_atomic_get_crtc_state(state, other_crtc);
257 		if (IS_ERR(other_crtc_state))
258 			return PTR_ERR(other_crtc_state);
259 
260 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
261 			drm_dbg_kms(&dev_priv->drm,
262 				    "fdi link B uses too many lanes to enable link C\n");
263 
264 			*pipe_to_reduce = PIPE_B;
265 
266 			return -EINVAL;
267 		}
268 		return 0;
269 	default:
270 		MISSING_CASE(pipe);
271 		return 0;
272 	}
273 }
274 
intel_fdi_pll_freq_update(struct drm_i915_private * i915)275 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
276 {
277 	if (IS_IRONLAKE(i915)) {
278 		u32 fdi_pll_clk =
279 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
280 
281 		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
282 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
283 		i915->display.fdi.pll_freq = 270000;
284 	} else {
285 		return;
286 	}
287 
288 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
289 }
290 
intel_fdi_link_freq(struct drm_i915_private * i915,const struct intel_crtc_state * pipe_config)291 int intel_fdi_link_freq(struct drm_i915_private *i915,
292 			const struct intel_crtc_state *pipe_config)
293 {
294 	if (HAS_DDI(i915))
295 		return pipe_config->port_clock; /* SPLL */
296 	else
297 		return i915->display.fdi.pll_freq;
298 }
299 
300 /**
301  * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
302  * @crtc_state: the crtc state
303  *
304  * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
305  * call this function during state computation in the simple case where the
306  * link bpp will always match the pipe bpp. This is the case for all non-DP
307  * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
308  * of DSC compression.
309  *
310  * Returns %true in case of success, %false if pipe bpp would need to be
311  * reduced below its valid range.
312  */
intel_fdi_compute_pipe_bpp(struct intel_crtc_state * crtc_state)313 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
314 {
315 	int pipe_bpp = min(crtc_state->pipe_bpp,
316 			   fxp_q4_to_int(crtc_state->max_link_bpp_x16));
317 
318 	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
319 
320 	if (pipe_bpp < 6 * 3)
321 		return false;
322 
323 	crtc_state->pipe_bpp = pipe_bpp;
324 
325 	return true;
326 }
327 
ilk_fdi_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)328 int ilk_fdi_compute_config(struct intel_crtc *crtc,
329 			   struct intel_crtc_state *pipe_config)
330 {
331 	struct drm_device *dev = crtc->base.dev;
332 	struct drm_i915_private *i915 = to_i915(dev);
333 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
334 	int lane, link_bw, fdi_dotclock;
335 
336 	/* FDI is a binary signal running at ~2.7GHz, encoding
337 	 * each output octet as 10 bits. The actual frequency
338 	 * is stored as a divider into a 100MHz clock, and the
339 	 * mode pixel clock is stored in units of 1KHz.
340 	 * Hence the bw of each lane in terms of the mode signal
341 	 * is:
342 	 */
343 	link_bw = intel_fdi_link_freq(i915, pipe_config);
344 
345 	fdi_dotclock = adjusted_mode->crtc_clock;
346 
347 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
348 				      pipe_config->pipe_bpp);
349 
350 	pipe_config->fdi_lanes = lane;
351 
352 	intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
353 			       lane, fdi_dotclock,
354 			       link_bw,
355 			       intel_dp_bw_fec_overhead(false),
356 			       &pipe_config->fdi_m_n);
357 
358 	return 0;
359 }
360 
intel_fdi_atomic_check_bw(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_link_bw_limits * limits)361 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
362 				     struct intel_crtc *crtc,
363 				     struct intel_crtc_state *pipe_config,
364 				     struct intel_link_bw_limits *limits)
365 {
366 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
367 	enum pipe pipe_to_reduce;
368 	int ret;
369 
370 	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
371 				  &pipe_to_reduce);
372 	if (ret != -EINVAL)
373 		return ret;
374 
375 	ret = intel_link_bw_reduce_bpp(state, limits,
376 				       BIT(pipe_to_reduce),
377 				       "FDI link BW");
378 
379 	return ret ? : -EAGAIN;
380 }
381 
382 /**
383  * intel_fdi_atomic_check_link - check all modeset FDI link configuration
384  * @state: intel atomic state
385  * @limits: link BW limits
386  *
387  * Check the link configuration for all modeset FDI outputs. If the
388  * configuration is invalid @limits will be updated if possible to
389  * reduce the total BW, after which the configuration for all CRTCs in
390  * @state must be recomputed with the updated @limits.
391  *
392  * Returns:
393  *   - 0 if the confugration is valid
394  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
395  *     with fallback values with which the configuration of all CRTCs
396  *     in @state must be recomputed
397  *   - Other negative error, if the configuration is invalid without a
398  *     fallback possibility, or the check failed for another reason
399  */
intel_fdi_atomic_check_link(struct intel_atomic_state * state,struct intel_link_bw_limits * limits)400 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
401 				struct intel_link_bw_limits *limits)
402 {
403 	struct intel_crtc *crtc;
404 	struct intel_crtc_state *crtc_state;
405 	int i;
406 
407 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
408 		int ret;
409 
410 		if (!crtc_state->has_pch_encoder ||
411 		    !intel_crtc_needs_modeset(crtc_state) ||
412 		    !crtc_state->hw.enable)
413 			continue;
414 
415 		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
416 		if (ret)
417 			return ret;
418 	}
419 
420 	return 0;
421 }
422 
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)423 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
424 {
425 	u32 temp;
426 
427 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
428 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
429 		return;
430 
431 	drm_WARN_ON(&dev_priv->drm,
432 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
433 		    FDI_RX_ENABLE);
434 	drm_WARN_ON(&dev_priv->drm,
435 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
436 		    FDI_RX_ENABLE);
437 
438 	temp &= ~FDI_BC_BIFURCATION_SELECT;
439 	if (enable)
440 		temp |= FDI_BC_BIFURCATION_SELECT;
441 
442 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
443 		    enable ? "en" : "dis");
444 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
445 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
446 }
447 
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)448 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
449 {
450 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
451 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
452 
453 	switch (crtc->pipe) {
454 	case PIPE_A:
455 		break;
456 	case PIPE_B:
457 		if (crtc_state->fdi_lanes > 2)
458 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
459 		else
460 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
461 
462 		break;
463 	case PIPE_C:
464 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
465 
466 		break;
467 	default:
468 		MISSING_CASE(crtc->pipe);
469 	}
470 }
471 
intel_fdi_normal_train(struct intel_crtc * crtc)472 void intel_fdi_normal_train(struct intel_crtc *crtc)
473 {
474 	struct drm_device *dev = crtc->base.dev;
475 	struct drm_i915_private *dev_priv = to_i915(dev);
476 	enum pipe pipe = crtc->pipe;
477 	i915_reg_t reg;
478 	u32 temp;
479 
480 	/* enable normal train */
481 	reg = FDI_TX_CTL(pipe);
482 	temp = intel_de_read(dev_priv, reg);
483 	if (IS_IVYBRIDGE(dev_priv)) {
484 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
485 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
486 	} else {
487 		temp &= ~FDI_LINK_TRAIN_NONE;
488 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
489 	}
490 	intel_de_write(dev_priv, reg, temp);
491 
492 	reg = FDI_RX_CTL(pipe);
493 	temp = intel_de_read(dev_priv, reg);
494 	if (HAS_PCH_CPT(dev_priv)) {
495 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
496 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
497 	} else {
498 		temp &= ~FDI_LINK_TRAIN_NONE;
499 		temp |= FDI_LINK_TRAIN_NONE;
500 	}
501 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
502 
503 	/* wait one idle pattern time */
504 	intel_de_posting_read(dev_priv, reg);
505 	udelay(1000);
506 
507 	/* IVB wants error correction enabled */
508 	if (IS_IVYBRIDGE(dev_priv))
509 		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
510 }
511 
512 /* The FDI link training functions for ILK/Ibexpeak. */
ilk_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)513 static void ilk_fdi_link_train(struct intel_crtc *crtc,
514 			       const struct intel_crtc_state *crtc_state)
515 {
516 	struct drm_device *dev = crtc->base.dev;
517 	struct drm_i915_private *dev_priv = to_i915(dev);
518 	enum pipe pipe = crtc->pipe;
519 	i915_reg_t reg;
520 	u32 temp, tries;
521 
522 	/*
523 	 * Write the TU size bits before fdi link training, so that error
524 	 * detection works.
525 	 */
526 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
527 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
528 
529 	/* FDI needs bits from pipe first */
530 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
531 
532 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
533 	   for train result */
534 	reg = FDI_RX_IMR(pipe);
535 	temp = intel_de_read(dev_priv, reg);
536 	temp &= ~FDI_RX_SYMBOL_LOCK;
537 	temp &= ~FDI_RX_BIT_LOCK;
538 	intel_de_write(dev_priv, reg, temp);
539 	intel_de_read(dev_priv, reg);
540 	udelay(150);
541 
542 	/* enable CPU FDI TX and PCH FDI RX */
543 	reg = FDI_TX_CTL(pipe);
544 	temp = intel_de_read(dev_priv, reg);
545 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
546 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
547 	temp &= ~FDI_LINK_TRAIN_NONE;
548 	temp |= FDI_LINK_TRAIN_PATTERN_1;
549 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
550 
551 	reg = FDI_RX_CTL(pipe);
552 	temp = intel_de_read(dev_priv, reg);
553 	temp &= ~FDI_LINK_TRAIN_NONE;
554 	temp |= FDI_LINK_TRAIN_PATTERN_1;
555 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
556 
557 	intel_de_posting_read(dev_priv, reg);
558 	udelay(150);
559 
560 	/* Ironlake workaround, enable clock pointer after FDI enable*/
561 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
562 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
563 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
564 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
565 
566 	reg = FDI_RX_IIR(pipe);
567 	for (tries = 0; tries < 5; tries++) {
568 		temp = intel_de_read(dev_priv, reg);
569 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
570 
571 		if ((temp & FDI_RX_BIT_LOCK)) {
572 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
573 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
574 			break;
575 		}
576 	}
577 	if (tries == 5)
578 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
579 
580 	/* Train 2 */
581 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
582 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
583 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
584 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
585 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
586 	udelay(150);
587 
588 	reg = FDI_RX_IIR(pipe);
589 	for (tries = 0; tries < 5; tries++) {
590 		temp = intel_de_read(dev_priv, reg);
591 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
592 
593 		if (temp & FDI_RX_SYMBOL_LOCK) {
594 			intel_de_write(dev_priv, reg,
595 				       temp | FDI_RX_SYMBOL_LOCK);
596 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
597 			break;
598 		}
599 	}
600 	if (tries == 5)
601 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
602 
603 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
604 
605 }
606 
607 static const int snb_b_fdi_train_param[] = {
608 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
609 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
610 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
611 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
612 };
613 
614 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)615 static void gen6_fdi_link_train(struct intel_crtc *crtc,
616 				const struct intel_crtc_state *crtc_state)
617 {
618 	struct drm_device *dev = crtc->base.dev;
619 	struct drm_i915_private *dev_priv = to_i915(dev);
620 	enum pipe pipe = crtc->pipe;
621 	i915_reg_t reg;
622 	u32 temp, i, retry;
623 
624 	/*
625 	 * Write the TU size bits before fdi link training, so that error
626 	 * detection works.
627 	 */
628 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
629 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
630 
631 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
632 	   for train result */
633 	reg = FDI_RX_IMR(pipe);
634 	temp = intel_de_read(dev_priv, reg);
635 	temp &= ~FDI_RX_SYMBOL_LOCK;
636 	temp &= ~FDI_RX_BIT_LOCK;
637 	intel_de_write(dev_priv, reg, temp);
638 
639 	intel_de_posting_read(dev_priv, reg);
640 	udelay(150);
641 
642 	/* enable CPU FDI TX and PCH FDI RX */
643 	reg = FDI_TX_CTL(pipe);
644 	temp = intel_de_read(dev_priv, reg);
645 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
646 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
647 	temp &= ~FDI_LINK_TRAIN_NONE;
648 	temp |= FDI_LINK_TRAIN_PATTERN_1;
649 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
650 	/* SNB-B */
651 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
652 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
653 
654 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
655 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
656 
657 	reg = FDI_RX_CTL(pipe);
658 	temp = intel_de_read(dev_priv, reg);
659 	if (HAS_PCH_CPT(dev_priv)) {
660 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
661 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
662 	} else {
663 		temp &= ~FDI_LINK_TRAIN_NONE;
664 		temp |= FDI_LINK_TRAIN_PATTERN_1;
665 	}
666 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
667 
668 	intel_de_posting_read(dev_priv, reg);
669 	udelay(150);
670 
671 	for (i = 0; i < 4; i++) {
672 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
673 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
674 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
675 		udelay(500);
676 
677 		for (retry = 0; retry < 5; retry++) {
678 			reg = FDI_RX_IIR(pipe);
679 			temp = intel_de_read(dev_priv, reg);
680 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
681 			if (temp & FDI_RX_BIT_LOCK) {
682 				intel_de_write(dev_priv, reg,
683 					       temp | FDI_RX_BIT_LOCK);
684 				drm_dbg_kms(&dev_priv->drm,
685 					    "FDI train 1 done.\n");
686 				break;
687 			}
688 			udelay(50);
689 		}
690 		if (retry < 5)
691 			break;
692 	}
693 	if (i == 4)
694 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
695 
696 	/* Train 2 */
697 	reg = FDI_TX_CTL(pipe);
698 	temp = intel_de_read(dev_priv, reg);
699 	temp &= ~FDI_LINK_TRAIN_NONE;
700 	temp |= FDI_LINK_TRAIN_PATTERN_2;
701 	if (IS_SANDYBRIDGE(dev_priv)) {
702 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
703 		/* SNB-B */
704 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
705 	}
706 	intel_de_write(dev_priv, reg, temp);
707 
708 	reg = FDI_RX_CTL(pipe);
709 	temp = intel_de_read(dev_priv, reg);
710 	if (HAS_PCH_CPT(dev_priv)) {
711 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
712 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
713 	} else {
714 		temp &= ~FDI_LINK_TRAIN_NONE;
715 		temp |= FDI_LINK_TRAIN_PATTERN_2;
716 	}
717 	intel_de_write(dev_priv, reg, temp);
718 
719 	intel_de_posting_read(dev_priv, reg);
720 	udelay(150);
721 
722 	for (i = 0; i < 4; i++) {
723 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
724 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
725 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
726 		udelay(500);
727 
728 		for (retry = 0; retry < 5; retry++) {
729 			reg = FDI_RX_IIR(pipe);
730 			temp = intel_de_read(dev_priv, reg);
731 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
732 			if (temp & FDI_RX_SYMBOL_LOCK) {
733 				intel_de_write(dev_priv, reg,
734 					       temp | FDI_RX_SYMBOL_LOCK);
735 				drm_dbg_kms(&dev_priv->drm,
736 					    "FDI train 2 done.\n");
737 				break;
738 			}
739 			udelay(50);
740 		}
741 		if (retry < 5)
742 			break;
743 	}
744 	if (i == 4)
745 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
746 
747 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
748 }
749 
750 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)751 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
752 				      const struct intel_crtc_state *crtc_state)
753 {
754 	struct drm_device *dev = crtc->base.dev;
755 	struct drm_i915_private *dev_priv = to_i915(dev);
756 	enum pipe pipe = crtc->pipe;
757 	i915_reg_t reg;
758 	u32 temp, i, j;
759 
760 	ivb_update_fdi_bc_bifurcation(crtc_state);
761 
762 	/*
763 	 * Write the TU size bits before fdi link training, so that error
764 	 * detection works.
765 	 */
766 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
767 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
768 
769 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
770 	   for train result */
771 	reg = FDI_RX_IMR(pipe);
772 	temp = intel_de_read(dev_priv, reg);
773 	temp &= ~FDI_RX_SYMBOL_LOCK;
774 	temp &= ~FDI_RX_BIT_LOCK;
775 	intel_de_write(dev_priv, reg, temp);
776 
777 	intel_de_posting_read(dev_priv, reg);
778 	udelay(150);
779 
780 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
781 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
782 
783 	/* Try each vswing and preemphasis setting twice before moving on */
784 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
785 		/* disable first in case we need to retry */
786 		reg = FDI_TX_CTL(pipe);
787 		temp = intel_de_read(dev_priv, reg);
788 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
789 		temp &= ~FDI_TX_ENABLE;
790 		intel_de_write(dev_priv, reg, temp);
791 
792 		reg = FDI_RX_CTL(pipe);
793 		temp = intel_de_read(dev_priv, reg);
794 		temp &= ~FDI_LINK_TRAIN_AUTO;
795 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
796 		temp &= ~FDI_RX_ENABLE;
797 		intel_de_write(dev_priv, reg, temp);
798 
799 		/* enable CPU FDI TX and PCH FDI RX */
800 		reg = FDI_TX_CTL(pipe);
801 		temp = intel_de_read(dev_priv, reg);
802 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
803 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
804 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
805 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
806 		temp |= snb_b_fdi_train_param[j/2];
807 		temp |= FDI_COMPOSITE_SYNC;
808 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
809 
810 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
811 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
812 
813 		reg = FDI_RX_CTL(pipe);
814 		temp = intel_de_read(dev_priv, reg);
815 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
816 		temp |= FDI_COMPOSITE_SYNC;
817 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
818 
819 		intel_de_posting_read(dev_priv, reg);
820 		udelay(1); /* should be 0.5us */
821 
822 		for (i = 0; i < 4; i++) {
823 			reg = FDI_RX_IIR(pipe);
824 			temp = intel_de_read(dev_priv, reg);
825 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
826 
827 			if (temp & FDI_RX_BIT_LOCK ||
828 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
829 				intel_de_write(dev_priv, reg,
830 					       temp | FDI_RX_BIT_LOCK);
831 				drm_dbg_kms(&dev_priv->drm,
832 					    "FDI train 1 done, level %i.\n",
833 					    i);
834 				break;
835 			}
836 			udelay(1); /* should be 0.5us */
837 		}
838 		if (i == 4) {
839 			drm_dbg_kms(&dev_priv->drm,
840 				    "FDI train 1 fail on vswing %d\n", j / 2);
841 			continue;
842 		}
843 
844 		/* Train 2 */
845 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
846 			     FDI_LINK_TRAIN_NONE_IVB,
847 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
848 		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
849 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
850 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
851 		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
852 		udelay(2); /* should be 1.5us */
853 
854 		for (i = 0; i < 4; i++) {
855 			reg = FDI_RX_IIR(pipe);
856 			temp = intel_de_read(dev_priv, reg);
857 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
858 
859 			if (temp & FDI_RX_SYMBOL_LOCK ||
860 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
861 				intel_de_write(dev_priv, reg,
862 					       temp | FDI_RX_SYMBOL_LOCK);
863 				drm_dbg_kms(&dev_priv->drm,
864 					    "FDI train 2 done, level %i.\n",
865 					    i);
866 				goto train_done;
867 			}
868 			udelay(2); /* should be 1.5us */
869 		}
870 		if (i == 4)
871 			drm_dbg_kms(&dev_priv->drm,
872 				    "FDI train 2 fail on vswing %d\n", j / 2);
873 	}
874 
875 train_done:
876 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
877 }
878 
879 /* Starting with Haswell, different DDI ports can work in FDI mode for
880  * connection to the PCH-located connectors. For this, it is necessary to train
881  * both the DDI port and PCH receiver for the desired DDI buffer settings.
882  *
883  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
884  * please note that when FDI mode is active on DDI E, it shares 2 lines with
885  * DDI A (which is used for eDP)
886  */
hsw_fdi_link_train(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)887 void hsw_fdi_link_train(struct intel_encoder *encoder,
888 			const struct intel_crtc_state *crtc_state)
889 {
890 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
891 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
892 	u32 temp, i, rx_ctl_val;
893 	int n_entries;
894 
895 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
896 
897 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
898 
899 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
900 	 * mode set "sequence for CRT port" document:
901 	 * - TP1 to TP2 time with the default value
902 	 * - FDI delay to 90h
903 	 *
904 	 * WaFDIAutoLinkSetTimingOverrride:hsw
905 	 */
906 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
907 		       FDI_RX_PWRDN_LANE1_VAL(2) |
908 		       FDI_RX_PWRDN_LANE0_VAL(2) |
909 		       FDI_RX_TP1_TO_TP2_48 |
910 		       FDI_RX_FDI_DELAY_90);
911 
912 	/* Enable the PCH Receiver FDI PLL */
913 	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
914 		     FDI_RX_PLL_ENABLE |
915 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
916 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
917 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
918 	udelay(220);
919 
920 	/* Switch from Rawclk to PCDclk */
921 	rx_ctl_val |= FDI_PCDCLK;
922 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
923 
924 	/* Configure Port Clock Select */
925 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
926 	intel_ddi_enable_clock(encoder, crtc_state);
927 
928 	/* Start the training iterating through available voltages and emphasis,
929 	 * testing each value twice. */
930 	for (i = 0; i < n_entries * 2; i++) {
931 		/* Configure DP_TP_CTL with auto-training */
932 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
933 			       DP_TP_CTL_FDI_AUTOTRAIN |
934 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
935 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
936 			       DP_TP_CTL_ENABLE);
937 
938 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
939 		 * DDI E does not support port reversal, the functionality is
940 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
941 		 * port reversal bit */
942 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
943 			       DDI_BUF_CTL_ENABLE |
944 			       ((crtc_state->fdi_lanes - 1) << 1) |
945 			       DDI_BUF_TRANS_SELECT(i / 2));
946 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
947 
948 		udelay(600);
949 
950 		/* Program PCH FDI Receiver TU */
951 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
952 
953 		/* Enable PCH FDI Receiver with auto-training */
954 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
955 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
956 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
957 
958 		/* Wait for FDI receiver lane calibration */
959 		udelay(30);
960 
961 		/* Unset FDI_RX_MISC pwrdn lanes */
962 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
963 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
964 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
965 
966 		/* Wait for FDI auto training time */
967 		udelay(5);
968 
969 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
970 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
971 			drm_dbg_kms(&dev_priv->drm,
972 				    "FDI link training done on step %d\n", i);
973 			break;
974 		}
975 
976 		/*
977 		 * Leave things enabled even if we failed to train FDI.
978 		 * Results in less fireworks from the state checker.
979 		 */
980 		if (i == n_entries * 2 - 1) {
981 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
982 			break;
983 		}
984 
985 		rx_ctl_val &= ~FDI_RX_ENABLE;
986 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
987 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
988 
989 		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
990 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
991 
992 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
993 		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
994 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
995 
996 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
997 
998 		/* Reset FDI_RX_MISC pwrdn lanes */
999 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1000 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1001 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1002 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
1003 	}
1004 
1005 	/* Enable normal pixel sending for FDI */
1006 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
1007 		       DP_TP_CTL_FDI_AUTOTRAIN |
1008 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
1009 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1010 		       DP_TP_CTL_ENABLE);
1011 }
1012 
hsw_fdi_disable(struct intel_encoder * encoder)1013 void hsw_fdi_disable(struct intel_encoder *encoder)
1014 {
1015 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1016 
1017 	/*
1018 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1019 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1020 	 * step 13 is the correct place for it. Step 18 is where it was
1021 	 * originally before the BUN.
1022 	 */
1023 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1024 	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1025 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1026 	intel_ddi_disable_clock(encoder);
1027 	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1028 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1029 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1030 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1031 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1032 }
1033 
ilk_fdi_pll_enable(const struct intel_crtc_state * crtc_state)1034 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1035 {
1036 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1037 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1038 	enum pipe pipe = crtc->pipe;
1039 	i915_reg_t reg;
1040 	u32 temp;
1041 
1042 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1043 	reg = FDI_RX_CTL(pipe);
1044 	temp = intel_de_read(dev_priv, reg);
1045 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1046 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1047 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1048 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1049 
1050 	intel_de_posting_read(dev_priv, reg);
1051 	udelay(200);
1052 
1053 	/* Switch from Rawclk to PCDclk */
1054 	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1055 	intel_de_posting_read(dev_priv, reg);
1056 	udelay(200);
1057 
1058 	/* Enable CPU FDI TX PLL, always on for Ironlake */
1059 	reg = FDI_TX_CTL(pipe);
1060 	temp = intel_de_read(dev_priv, reg);
1061 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1062 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1063 
1064 		intel_de_posting_read(dev_priv, reg);
1065 		udelay(100);
1066 	}
1067 }
1068 
ilk_fdi_pll_disable(struct intel_crtc * crtc)1069 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1070 {
1071 	struct drm_device *dev = crtc->base.dev;
1072 	struct drm_i915_private *dev_priv = to_i915(dev);
1073 	enum pipe pipe = crtc->pipe;
1074 
1075 	/* Switch from PCDclk to Rawclk */
1076 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1077 
1078 	/* Disable CPU FDI TX PLL */
1079 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1080 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1081 	udelay(100);
1082 
1083 	/* Wait for the clocks to turn off. */
1084 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1085 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1086 	udelay(100);
1087 }
1088 
ilk_fdi_disable(struct intel_crtc * crtc)1089 void ilk_fdi_disable(struct intel_crtc *crtc)
1090 {
1091 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092 	enum pipe pipe = crtc->pipe;
1093 	i915_reg_t reg;
1094 	u32 temp;
1095 
1096 	/* disable CPU FDI tx and PCH FDI rx */
1097 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1098 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1099 
1100 	reg = FDI_RX_CTL(pipe);
1101 	temp = intel_de_read(dev_priv, reg);
1102 	temp &= ~(0x7 << 16);
1103 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1104 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1105 
1106 	intel_de_posting_read(dev_priv, reg);
1107 	udelay(100);
1108 
1109 	/* Ironlake workaround, disable clock pointer after downing FDI */
1110 	if (HAS_PCH_IBX(dev_priv))
1111 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1112 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1113 
1114 	/* still set train pattern 1 */
1115 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1116 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1117 
1118 	reg = FDI_RX_CTL(pipe);
1119 	temp = intel_de_read(dev_priv, reg);
1120 	if (HAS_PCH_CPT(dev_priv)) {
1121 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1122 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1123 	} else {
1124 		temp &= ~FDI_LINK_TRAIN_NONE;
1125 		temp |= FDI_LINK_TRAIN_PATTERN_1;
1126 	}
1127 	/* BPC in FDI rx is consistent with that in TRANSCONF */
1128 	temp &= ~(0x07 << 16);
1129 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1130 	intel_de_write(dev_priv, reg, temp);
1131 
1132 	intel_de_posting_read(dev_priv, reg);
1133 	udelay(100);
1134 }
1135 
1136 static const struct intel_fdi_funcs ilk_funcs = {
1137 	.fdi_link_train = ilk_fdi_link_train,
1138 };
1139 
1140 static const struct intel_fdi_funcs gen6_funcs = {
1141 	.fdi_link_train = gen6_fdi_link_train,
1142 };
1143 
1144 static const struct intel_fdi_funcs ivb_funcs = {
1145 	.fdi_link_train = ivb_manual_fdi_link_train,
1146 };
1147 
1148 void
intel_fdi_init_hook(struct drm_i915_private * dev_priv)1149 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1150 {
1151 	if (IS_IRONLAKE(dev_priv)) {
1152 		dev_priv->display.funcs.fdi = &ilk_funcs;
1153 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1154 		dev_priv->display.funcs.fdi = &gen6_funcs;
1155 	} else if (IS_IVYBRIDGE(dev_priv)) {
1156 		/* FIXME: detect B0+ stepping and use auto training */
1157 		dev_priv->display.funcs.fdi = &ivb_funcs;
1158 	}
1159 }
1160